a|b|c) -> (.+)
- pattern = altFieldsGroupRe.ReplaceAllStringFunc(pattern, func(s string) string {
- return altFieldsRe.ReplaceAllString(s, ".+")
- })
-
- // Initialize paths with the original pattern or the halves of an
- // alternation, which is also present in some patterns.
- matches := altRe.FindAllStringSubmatch(pattern, -1)
- if len(matches) > 0 {
- paths = []string{matches[0][1], matches[0][2]}
- } else {
- paths = []string{pattern}
- }
-
- // Expand all optional regex elements into two paths. This approach is really only useful up to 2 optional
- // groups, but we probably don't want to deal with the exponential increase beyond that anyway.
- for i := 0; i < len(paths); i++ {
- p := paths[i]
-
- // match is a 2-element slice that will have a start and end index
- // for the left-most match of a regex of form: (lease/)?
- match := optRe.FindStringIndex(p)
-
- if match != nil {
- // create a path that includes the optional element but without
- // parenthesis or the '?' character.
- paths[i] = p[:match[0]] + p[match[0]+1:match[1]-2] + p[match[1]:]
-
- // create a path that excludes the optional element.
- paths = append(paths, p[:match[0]]+p[match[1]:])
- i--
- }
- }
-
- // Replace named parameters (?P) with {foo}
- var replacedPaths []string
-
- for _, path := range paths {
- result := reqdRe.FindAllStringSubmatch(path, -1)
- if result != nil {
- for _, p := range result {
- par := p[1]
- path = strings.Replace(path, p[0], fmt.Sprintf("{%s}", par), 1)
- }
- }
- // Final cleanup
- path = cleanSuffixRe.ReplaceAllString(path, "")
- path = cleanCharsRe.ReplaceAllString(path, "")
- replacedPaths = append(replacedPaths, path)
- }
-
- return replacedPaths
-}
-
-// schemaType is a subset of the JSON Schema elements used as a target
-// for conversions from Vault's standard FieldTypes.
-type schemaType struct {
- baseType string
- items string
- format string
- pattern string
-}
-
-// convertType translates a FieldType into an OpenAPI type.
-// In the case of arrays, a subtype is returned as well.
-func convertType(t FieldType) schemaType {
- ret := schemaType{}
-
- switch t {
- case TypeString, TypeHeader:
- ret.baseType = "string"
- case TypeNameString:
- ret.baseType = "string"
- ret.pattern = `\w([\w-.]*\w)?`
- case TypeLowerCaseString:
- ret.baseType = "string"
- ret.format = "lowercase"
- case TypeInt:
- ret.baseType = "number"
- case TypeDurationSecond:
- ret.baseType = "number"
- ret.format = "seconds"
- case TypeBool:
- ret.baseType = "boolean"
- case TypeMap:
- ret.baseType = "object"
- ret.format = "map"
- case TypeKVPairs:
- ret.baseType = "object"
- ret.format = "kvpairs"
- case TypeSlice:
- ret.baseType = "array"
- ret.items = "object"
- case TypeStringSlice, TypeCommaStringSlice:
- ret.baseType = "array"
- ret.items = "string"
- case TypeCommaIntSlice:
- ret.baseType = "array"
- ret.items = "number"
- default:
- log.L().Warn("error parsing field type", "type", t)
- ret.format = "unknown"
- }
-
- return ret
-}
-
-// cleanString prepares s for inclusion in the output
-func cleanString(s string) string {
- // clean leading/trailing whitespace, and replace whitespace runs into a single space
- s = strings.TrimSpace(s)
- s = wsRe.ReplaceAllString(s, " ")
- return s
-}
-
-// splitFields partitions fields into path and body groups
-// The input pattern is expected to have been run through expandPattern,
-// with paths parameters denotes in {braces}.
-func splitFields(allFields map[string]*FieldSchema, pattern string) (pathFields, bodyFields map[string]*FieldSchema) {
- pathFields = make(map[string]*FieldSchema)
- bodyFields = make(map[string]*FieldSchema)
-
- for _, match := range pathFieldsRe.FindAllStringSubmatch(pattern, -1) {
- name := match[1]
- pathFields[name] = allFields[name]
- }
-
- for name, field := range allFields {
- if _, ok := pathFields[name]; !ok {
- // Header fields are in "parameters" with other path fields
- if field.Type == TypeHeader {
- pathFields[name] = field
- } else {
- bodyFields[name] = field
- }
- }
- }
-
- return pathFields, bodyFields
-}
-
-// cleanedResponse is identical to logical.Response but with nulls
-// removed from from JSON encoding
-type cleanedResponse struct {
- Secret *logical.Secret `json:"secret,omitempty"`
- Auth *logical.Auth `json:"auth,omitempty"`
- Data map[string]interface{} `json:"data,omitempty"`
- Redirect string `json:"redirect,omitempty"`
- Warnings []string `json:"warnings,omitempty"`
- WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info,omitempty"`
-}
-
-func cleanResponse(resp *logical.Response) (*cleanedResponse, error) {
- var r cleanedResponse
-
- if err := mapstructure.Decode(resp, &r); err != nil {
- return nil, err
- }
-
- return &r, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path.go b/vendor/github.com/hashicorp/vault/logical/framework/path.go
deleted file mode 100644
index 4093caa0..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package framework
-
-import (
- "context"
- "fmt"
- "sort"
- "strings"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/license"
- "github.com/hashicorp/vault/logical"
-)
-
-// Helper which returns a generic regex string for creating endpoint patterns
-// that are identified by the given name in the backends
-func GenericNameRegex(name string) string {
- return fmt.Sprintf("(?P<%s>\\w(([\\w-.]+)?\\w)?)", name)
-}
-
-// GenericNameWithAtRegex returns a generic regex that allows alphanumeric
-// characters along with -, . and @.
-func GenericNameWithAtRegex(name string) string {
- return fmt.Sprintf("(?P<%s>\\w(([\\w-.@]+)?\\w)?)", name)
-}
-
-// Helper which returns a regex string for optionally accepting the a field
-// from the API URL
-func OptionalParamRegex(name string) string {
- return fmt.Sprintf("(/(?P<%s>.+))?", name)
-}
-
-// Helper which returns a regex string for capturing an entire endpoint path
-// as the given name.
-func MatchAllRegex(name string) string {
- return fmt.Sprintf(`(?P<%s>.*)`, name)
-}
-
-// PathAppend is a helper for appending lists of paths into a single
-// list.
-func PathAppend(paths ...[]*Path) []*Path {
- result := make([]*Path, 0, 10)
- for _, ps := range paths {
- result = append(result, ps...)
- }
-
- return result
-}
-
-// Path is a single path that the backend responds to.
-type Path struct {
- // Pattern is the pattern of the URL that matches this path.
- //
- // This should be a valid regular expression. Named captures will be
- // exposed as fields that should map to a schema in Fields. If a named
- // capture is not a field in the Fields map, then it will be ignored.
- Pattern string
-
- // Fields is the mapping of data fields to a schema describing that
- // field. Named captures in the Pattern also map to fields. If a named
- // capture name matches a PUT body name, the named capture takes
- // priority.
- //
- // Note that only named capture fields are available in every operation,
- // whereas all fields are available in the Write operation.
- Fields map[string]*FieldSchema
-
- // Operations is the set of operations supported and the associated OperationsHandler.
- //
- // If both Create and Update operations are present, documentation and examples from
- // the Update definition will be used. Similarly if both Read and List are present,
- // Read will be used for documentation.
- Operations map[logical.Operation]OperationHandler
-
- // Callbacks are the set of callbacks that are called for a given
- // operation. If a callback for a specific operation is not present,
- // then logical.ErrUnsupportedOperation is automatically generated.
- //
- // The help operation is the only operation that the Path will
- // automatically handle if the Help field is set. If both the Help
- // field is set and there is a callback registered here, then the
- // callback will be called.
- //
- // Deprecated: Operations should be used instead and will take priority if present.
- Callbacks map[logical.Operation]OperationFunc
-
- // ExistenceCheck, if implemented, is used to query whether a given
- // resource exists or not. This is used for ACL purposes: if an Update
- // action is specified, and the existence check returns false, the action
- // is not allowed since the resource must first be created. The reverse is
- // also true. If not specified, the Update action is forced and the user
- // must have UpdateCapability on the path.
- ExistenceCheck ExistenceFunc
-
- // FeatureRequired, if implemented, will validate if the given feature is
- // enabled for the set of paths
- FeatureRequired license.Features
-
- // Deprecated denotes that this path is considered deprecated. This may
- // be reflected in help and documentation.
- Deprecated bool
-
- // Help is text describing how to use this path. This will be used
- // to auto-generate the help operation. The Path will automatically
- // generate a parameter listing and URL structure based on the
- // regular expression, so the help text should just contain a description
- // of what happens.
- //
- // HelpSynopsis is a one-sentence description of the path. This will
- // be automatically line-wrapped at 80 characters.
- //
- // HelpDescription is a long-form description of the path. This will
- // be automatically line-wrapped at 80 characters.
- HelpSynopsis string
- HelpDescription string
-}
-
-// OperationHandler defines and describes a specific operation handler.
-type OperationHandler interface {
- Handler() OperationFunc
- Properties() OperationProperties
-}
-
-// OperationProperties describes an operation for documentation, help text,
-// and other clients. A Summary should always be provided, whereas other
-// fields can be populated as needed.
-type OperationProperties struct {
- // Summary is a brief (usually one line) description of the operation.
- Summary string
-
- // Description is extended documentation of the operation and may contain
- // Markdown-formatted text markup.
- Description string
-
- // Examples provides samples of the expected request data. The most
- // relevant example should be first in the list, as it will be shown in
- // documentation that supports only a single example.
- Examples []RequestExample
-
- // Responses provides a list of response description for a given response
- // code. The most relevant response should be first in the list, as it will
- // be shown in documentation that only allows a single example.
- Responses map[int][]Response
-
- // Unpublished indicates that this operation should not appear in public
- // documentation or help text. The operation may still have documentation
- // attached that can be used internally.
- Unpublished bool
-
- // Deprecated indicates that this operation should be avoided.
- Deprecated bool
-}
-
-// RequestExample is example of request data.
-type RequestExample struct {
- Description string // optional description of the request
- Data map[string]interface{} // map version of sample JSON request data
-
- // Optional example response to the sample request. This approach is considered
- // provisional for now, and this field may be changed or removed.
- Response *Response
-}
-
-// Response describes and optional demonstrations an operation response.
-type Response struct {
- Description string // summary of the the response and should always be provided
- MediaType string // media type of the response, defaulting to "application/json" if empty
- Example *logical.Response // example response data
-}
-
-// PathOperation is a concrete implementation of OperationHandler.
-type PathOperation struct {
- Callback OperationFunc
- Summary string
- Description string
- Examples []RequestExample
- Responses map[int][]Response
- Unpublished bool
- Deprecated bool
-}
-
-func (p *PathOperation) Handler() OperationFunc {
- return p.Callback
-}
-
-func (p *PathOperation) Properties() OperationProperties {
- return OperationProperties{
- Summary: strings.TrimSpace(p.Summary),
- Description: strings.TrimSpace(p.Description),
- Responses: p.Responses,
- Examples: p.Examples,
- Unpublished: p.Unpublished,
- Deprecated: p.Deprecated,
- }
-}
-
-func (p *Path) helpCallback(b *Backend) OperationFunc {
- return func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) {
- var tplData pathTemplateData
- tplData.Request = req.Path
- tplData.RoutePattern = p.Pattern
- tplData.Synopsis = strings.TrimSpace(p.HelpSynopsis)
- if tplData.Synopsis == "" {
- tplData.Synopsis = ""
- }
- tplData.Description = strings.TrimSpace(p.HelpDescription)
- if tplData.Description == "" {
- tplData.Description = ""
- }
-
- // Alphabetize the fields
- fieldKeys := make([]string, 0, len(p.Fields))
- for k, _ := range p.Fields {
- fieldKeys = append(fieldKeys, k)
- }
- sort.Strings(fieldKeys)
-
- // Build the field help
- tplData.Fields = make([]pathTemplateFieldData, len(fieldKeys))
- for i, k := range fieldKeys {
- schema := p.Fields[k]
- description := strings.TrimSpace(schema.Description)
- if description == "" {
- description = ""
- }
-
- tplData.Fields[i] = pathTemplateFieldData{
- Key: k,
- Type: schema.Type.String(),
- Description: description,
- }
- }
-
- help, err := executeTemplate(pathHelpTemplate, &tplData)
- if err != nil {
- return nil, errwrap.Wrapf("error executing template: {{err}}", err)
- }
-
- // Build OpenAPI response for this path
- doc := NewOASDocument()
- if err := documentPath(p, b.SpecialPaths(), b.BackendType, doc); err != nil {
- b.Logger().Warn("error generating OpenAPI", "error", err)
- }
-
- return logical.HelpResponse(help, nil, doc), nil
- }
-}
-
-type pathTemplateData struct {
- Request string
- RoutePattern string
- Synopsis string
- Description string
- Fields []pathTemplateFieldData
-}
-
-type pathTemplateFieldData struct {
- Key string
- Type string
- Description string
- URL bool
-}
-
-const pathHelpTemplate = `
-Request: {{.Request}}
-Matching Route: {{.RoutePattern}}
-
-{{.Synopsis}}
-
-{{ if .Fields -}}
-## PARAMETERS
-{{range .Fields}}
-{{indent 4 .Key}} ({{.Type}})
-{{indent 8 .Description}}
-{{end}}{{end}}
-## DESCRIPTION
-
-{{.Description}}
-`
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go b/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
deleted file mode 100644
index 83aa0baf..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_map.go
+++ /dev/null
@@ -1,283 +0,0 @@
-package framework
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
-
- saltpkg "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-// PathMap can be used to generate a path that stores mappings in the
-// storage. It is a structure that also exports functions for querying the
-// mappings.
-//
-// The primary use case for this is for credential providers to do their
-// mapping to policies.
-type PathMap struct {
- Prefix string
- Name string
- Schema map[string]*FieldSchema
- CaseSensitive bool
- Salt *saltpkg.Salt
- SaltFunc func(context.Context) (*saltpkg.Salt, error)
-
- once sync.Once
-}
-
-func (p *PathMap) init() {
- if p.Prefix == "" {
- p.Prefix = "map"
- }
-
- if p.Schema == nil {
- p.Schema = map[string]*FieldSchema{
- "value": &FieldSchema{
- Type: TypeString,
- Description: fmt.Sprintf("Value for %s mapping", p.Name),
- },
- }
- }
-}
-
-// pathStruct returns the pathStruct for this mapping
-func (p *PathMap) pathStruct(ctx context.Context, s logical.Storage, k string) (*PathStruct, error) {
- p.once.Do(p.init)
-
- // If we don't care about casing, store everything lowercase
- if !p.CaseSensitive {
- k = strings.ToLower(k)
- }
-
- // The original key before any salting
- origKey := k
-
- // If we have a salt, apply it before lookup
- salt := p.Salt
- var err error
- if p.SaltFunc != nil {
- salt, err = p.SaltFunc(ctx)
- if err != nil {
- return nil, err
- }
- }
- if salt != nil {
- k = "s" + salt.SaltIDHashFunc(k, saltpkg.SHA256Hash)
- }
-
- finalName := fmt.Sprintf("map/%s/%s", p.Name, k)
- ps := &PathStruct{
- Name: finalName,
- Schema: p.Schema,
- }
-
- if !strings.HasPrefix(origKey, "s") && k != origKey {
- // Ensure that no matter what happens what is returned is the final
- // path
- defer func() {
- ps.Name = finalName
- }()
-
- //
- // Check for unsalted version and upgrade if so
- //
-
- // Generate the unsalted name
- unsaltedName := fmt.Sprintf("map/%s/%s", p.Name, origKey)
- // Set the path struct to use the unsalted name
- ps.Name = unsaltedName
-
- val, err := ps.Get(ctx, s)
- if err != nil {
- return nil, err
- }
- // If not nil, we have an unsalted entry -- upgrade it
- if val != nil {
- // Set the path struct to use the desired final name
- ps.Name = finalName
- err = ps.Put(ctx, s, val)
- if err != nil {
- return nil, err
- }
- // Set it back to the old path and delete
- ps.Name = unsaltedName
- err = ps.Delete(ctx, s)
- if err != nil {
- return nil, err
- }
- // We'll set this in the deferred function but doesn't hurt here
- ps.Name = finalName
- }
-
- //
- // Check for SHA1 hashed version and upgrade if so
- //
-
- // Generate the SHA1 hash suffixed path name
- sha1SuffixedName := fmt.Sprintf("map/%s/%s", p.Name, salt.SaltID(origKey))
-
- // Set the path struct to use the SHA1 hash suffixed path name
- ps.Name = sha1SuffixedName
-
- val, err = ps.Get(ctx, s)
- if err != nil {
- return nil, err
- }
- // If not nil, we have an SHA1 hash suffixed entry -- upgrade it
- if val != nil {
- // Set the path struct to use the desired final name
- ps.Name = finalName
- err = ps.Put(ctx, s, val)
- if err != nil {
- return nil, err
- }
- // Set it back to the old path and delete
- ps.Name = sha1SuffixedName
- err = ps.Delete(ctx, s)
- if err != nil {
- return nil, err
- }
- // We'll set this in the deferred function but doesn't hurt here
- ps.Name = finalName
- }
- }
-
- return ps, nil
-}
-
-// Get reads a value out of the mapping
-func (p *PathMap) Get(ctx context.Context, s logical.Storage, k string) (map[string]interface{}, error) {
- ps, err := p.pathStruct(ctx, s, k)
- if err != nil {
- return nil, err
- }
- return ps.Get(ctx, s)
-}
-
-// Put writes a value into the mapping
-func (p *PathMap) Put(ctx context.Context, s logical.Storage, k string, v map[string]interface{}) error {
- ps, err := p.pathStruct(ctx, s, k)
- if err != nil {
- return err
- }
- return ps.Put(ctx, s, v)
-}
-
-// Delete removes a value from the mapping
-func (p *PathMap) Delete(ctx context.Context, s logical.Storage, k string) error {
- ps, err := p.pathStruct(ctx, s, k)
- if err != nil {
- return err
- }
- return ps.Delete(ctx, s)
-}
-
-// List reads the keys under a given path
-func (p *PathMap) List(ctx context.Context, s logical.Storage, prefix string) ([]string, error) {
- stripPrefix := fmt.Sprintf("struct/map/%s/", p.Name)
- fullPrefix := fmt.Sprintf("%s%s", stripPrefix, prefix)
- out, err := s.List(ctx, fullPrefix)
- if err != nil {
- return nil, err
- }
- stripped := make([]string, len(out))
- for idx, k := range out {
- stripped[idx] = strings.TrimPrefix(k, stripPrefix)
- }
- return stripped, nil
-}
-
-// Paths are the paths to append to the Backend paths.
-func (p *PathMap) Paths() []*Path {
- p.once.Do(p.init)
-
- // Build the schema by simply adding the "key"
- schema := make(map[string]*FieldSchema)
- for k, v := range p.Schema {
- schema[k] = v
- }
- schema["key"] = &FieldSchema{
- Type: TypeString,
- Description: fmt.Sprintf("Key for the %s mapping", p.Name),
- }
-
- return []*Path{
- &Path{
- Pattern: fmt.Sprintf("%s/%s/?$", p.Prefix, p.Name),
-
- Callbacks: map[logical.Operation]OperationFunc{
- logical.ListOperation: p.pathList(),
- logical.ReadOperation: p.pathList(),
- },
-
- HelpSynopsis: fmt.Sprintf("Read mappings for %s", p.Name),
- },
-
- &Path{
- Pattern: fmt.Sprintf(`%s/%s/(?P[-\w]+)`, p.Prefix, p.Name),
-
- Fields: schema,
-
- Callbacks: map[logical.Operation]OperationFunc{
- logical.CreateOperation: p.pathSingleWrite(),
- logical.ReadOperation: p.pathSingleRead(),
- logical.UpdateOperation: p.pathSingleWrite(),
- logical.DeleteOperation: p.pathSingleDelete(),
- },
-
- HelpSynopsis: fmt.Sprintf("Read/write/delete a single %s mapping", p.Name),
-
- ExistenceCheck: p.pathSingleExistenceCheck(),
- },
- }
-}
-
-func (p *PathMap) pathList() OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) {
- keys, err := p.List(ctx, req.Storage, "")
- if err != nil {
- return nil, err
- }
-
- return logical.ListResponse(keys), nil
- }
-}
-
-func (p *PathMap) pathSingleRead() OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) {
- v, err := p.Get(ctx, req.Storage, d.Get("key").(string))
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: v,
- }, nil
- }
-}
-
-func (p *PathMap) pathSingleWrite() OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Put(ctx, req.Storage, d.Get("key").(string), d.Raw)
- return nil, err
- }
-}
-
-func (p *PathMap) pathSingleDelete() OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Delete(ctx, req.Storage, d.Get("key").(string))
- return nil, err
- }
-}
-
-func (p *PathMap) pathSingleExistenceCheck() ExistenceFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (bool, error) {
- v, err := p.Get(ctx, req.Storage, d.Get("key").(string))
- if err != nil {
- return false, err
- }
- return v != nil, nil
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go b/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go
deleted file mode 100644
index beaed52d..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/path_struct.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package framework
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// PathStruct can be used to generate a path that stores a struct
-// in the storage. This structure is a map[string]interface{} but the
-// types are set according to the schema in this structure.
-type PathStruct struct {
- Name string
- Path string
- Schema map[string]*FieldSchema
- HelpSynopsis string
- HelpDescription string
-
- Read bool
-}
-
-// Get reads the structure.
-func (p *PathStruct) Get(ctx context.Context, s logical.Storage) (map[string]interface{}, error) {
- entry, err := s.Get(ctx, fmt.Sprintf("struct/%s", p.Name))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result map[string]interface{}
- if err := jsonutil.DecodeJSON(entry.Value, &result); err != nil {
- return nil, err
- }
-
- return result, nil
-}
-
-// Put writes the structure.
-func (p *PathStruct) Put(ctx context.Context, s logical.Storage, v map[string]interface{}) error {
- bytes, err := json.Marshal(v)
- if err != nil {
- return err
- }
-
- return s.Put(ctx, &logical.StorageEntry{
- Key: fmt.Sprintf("struct/%s", p.Name),
- Value: bytes,
- })
-}
-
-// Delete removes the structure.
-func (p *PathStruct) Delete(ctx context.Context, s logical.Storage) error {
- return s.Delete(ctx, fmt.Sprintf("struct/%s", p.Name))
-}
-
-// Paths are the paths to append to the Backend paths.
-func (p *PathStruct) Paths() []*Path {
- // The single path we support to read/write this config
- path := &Path{
- Pattern: p.Path,
- Fields: p.Schema,
-
- Callbacks: map[logical.Operation]OperationFunc{
- logical.CreateOperation: p.pathWrite(),
- logical.UpdateOperation: p.pathWrite(),
- logical.DeleteOperation: p.pathDelete(),
- },
-
- ExistenceCheck: p.pathExistenceCheck(),
-
- HelpSynopsis: p.HelpSynopsis,
- HelpDescription: p.HelpDescription,
- }
-
- // If we support reads, add that
- if p.Read {
- path.Callbacks[logical.ReadOperation] = p.pathRead()
- }
-
- return []*Path{path}
-}
-
-func (p *PathStruct) pathRead() OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) {
- v, err := p.Get(ctx, req.Storage)
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Data: v,
- }, nil
- }
-}
-
-func (p *PathStruct) pathWrite() OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Put(ctx, req.Storage, d.Raw)
- return nil, err
- }
-}
-
-func (p *PathStruct) pathDelete() OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) {
- err := p.Delete(ctx, req.Storage)
- return nil, err
- }
-}
-
-func (p *PathStruct) pathExistenceCheck() ExistenceFunc {
- return func(ctx context.Context, req *logical.Request, d *FieldData) (bool, error) {
- v, err := p.Get(ctx, req.Storage)
- if err != nil {
- return false, err
- }
-
- return v != nil, nil
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go b/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go
deleted file mode 100644
index 089cf7f2..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/policy_map.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package framework
-
-import (
- "context"
- "sort"
- "strings"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// PolicyMap is a specialization of PathMap that expects the values to
-// be lists of policies. This assists in querying and loading policies
-// from the PathMap.
-type PolicyMap struct {
- PathMap
-
- DefaultKey string
- PolicyKey string
-}
-
-func (p *PolicyMap) Policies(ctx context.Context, s logical.Storage, names ...string) ([]string, error) {
- policyKey := "value"
- if p.PolicyKey != "" {
- policyKey = p.PolicyKey
- }
-
- if p.DefaultKey != "" {
- newNames := make([]string, len(names)+1)
- newNames[0] = p.DefaultKey
- copy(newNames[1:], names)
- names = newNames
- }
-
- set := make(map[string]struct{})
- for _, name := range names {
- v, err := p.Get(ctx, s, name)
- if err != nil {
- return nil, err
- }
-
- valuesRaw, ok := v[policyKey]
- if !ok {
- continue
- }
-
- values, ok := valuesRaw.(string)
- if !ok {
- continue
- }
-
- for _, p := range strings.Split(values, ",") {
- if p = strings.TrimSpace(p); p != "" {
- set[p] = struct{}{}
- }
- }
- }
-
- list := make([]string, 0, len(set))
- for k, _ := range set {
- list = append(list, k)
- }
- sort.Strings(list)
-
- return list, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/secret.go b/vendor/github.com/hashicorp/vault/logical/framework/secret.go
deleted file mode 100644
index 616a055c..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/secret.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package framework
-
-import (
- "context"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// Secret is a type of secret that can be returned from a backend.
-type Secret struct {
- // Type is the name of this secret type. This is used to setup the
- // vault ID and to look up the proper secret structure when revocation/
- // renewal happens. Once this is set this should not be changed.
- //
- // The format of this must match (case insensitive): ^a-Z0-9_$
- Type string
-
- // Fields is the mapping of data fields and schema that comprise
- // the structure of this secret.
- Fields map[string]*FieldSchema
-
- // DefaultDuration is the default value for the duration of the lease for
- // this secret. This can be manually overwritten with the result of
- // Response().
- //
- // If these aren't set, Vault core will set a default lease period which
- // may come from a mount tuning.
- DefaultDuration time.Duration
-
- // Renew is the callback called to renew this secret. If Renew is
- // not specified then renewable is set to false in the secret.
- // See lease.go for helpers for this value.
- Renew OperationFunc
-
- // Revoke is the callback called to revoke this secret. This is required.
- Revoke OperationFunc
-}
-
-func (s *Secret) Renewable() bool {
- return s.Renew != nil
-}
-
-func (s *Secret) Response(
- data, internal map[string]interface{}) *logical.Response {
- internalData := make(map[string]interface{})
- for k, v := range internal {
- internalData[k] = v
- }
- internalData["secret_type"] = s.Type
-
- return &logical.Response{
- Secret: &logical.Secret{
- LeaseOptions: logical.LeaseOptions{
- TTL: s.DefaultDuration,
- Renewable: s.Renewable(),
- },
- InternalData: internalData,
- },
-
- Data: data,
- }
-}
-
-// HandleRenew is the request handler for renewing this secret.
-func (s *Secret) HandleRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) {
- if !s.Renewable() {
- return nil, logical.ErrUnsupportedOperation
- }
-
- data := &FieldData{
- Raw: req.Data,
- Schema: s.Fields,
- }
-
- return s.Renew(ctx, req, data)
-}
-
-// HandleRevoke is the request handler for renewing this secret.
-func (s *Secret) HandleRevoke(ctx context.Context, req *logical.Request) (*logical.Response, error) {
- data := &FieldData{
- Raw: req.Data,
- Schema: s.Fields,
- }
-
- if s.Revoke != nil {
- return s.Revoke(ctx, req, data)
- }
-
- return nil, logical.ErrUnsupportedOperation
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/template.go b/vendor/github.com/hashicorp/vault/logical/framework/template.go
deleted file mode 100644
index 3abdd624..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/template.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package framework
-
-import (
- "bufio"
- "bytes"
- "strings"
- "text/template"
-
- "github.com/hashicorp/errwrap"
-)
-
-func executeTemplate(tpl string, data interface{}) (string, error) {
- // Define the functions
- funcs := map[string]interface{}{
- "indent": funcIndent,
- }
-
- // Parse the help template
- t, err := template.New("root").Funcs(funcs).Parse(tpl)
- if err != nil {
- return "", errwrap.Wrapf("error parsing template: {{err}}", err)
- }
-
- // Execute the template and store the output
- var buf bytes.Buffer
- if err := t.Execute(&buf, data); err != nil {
- return "", errwrap.Wrapf("error executing template: {{err}}", err)
- }
-
- return strings.TrimSpace(buf.String()), nil
-}
-
-func funcIndent(count int, text string) string {
- var buf bytes.Buffer
- prefix := strings.Repeat(" ", count)
- scan := bufio.NewScanner(strings.NewReader(text))
- for scan.Scan() {
- buf.WriteString(prefix + scan.Text() + "\n")
- }
-
- return strings.TrimRight(buf.String(), "\n")
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/testing.go b/vendor/github.com/hashicorp/vault/logical/framework/testing.go
deleted file mode 100644
index a00a3241..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/testing.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package framework
-
-import (
- "testing"
-)
-
-// TestBackendRoutes is a helper to test that all the given routes will
-// route properly in the backend.
-func TestBackendRoutes(t *testing.T, b *Backend, rs []string) {
- for _, r := range rs {
- if b.Route(r) == nil {
- t.Fatalf("bad route: %s", r)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/framework/wal.go b/vendor/github.com/hashicorp/vault/logical/framework/wal.go
deleted file mode 100644
index c8fa3b87..00000000
--- a/vendor/github.com/hashicorp/vault/logical/framework/wal.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package framework
-
-import (
- "context"
- "encoding/json"
- "strings"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// WALPrefix is the prefix within Storage where WAL entries will be written.
-const WALPrefix = "wal/"
-
-type WALEntry struct {
- ID string `json:"-"`
- Kind string `json:"type"`
- Data interface{} `json:"data"`
- CreatedAt int64 `json:"created_at"`
-}
-
-// PutWAL writes some data to the WAL.
-//
-// The kind parameter is used by the framework to allow users to store
-// multiple kinds of WAL data and to easily disambiguate what data they're
-// expecting.
-//
-// Data within the WAL that is uncommitted (CommitWAL hasn't be called)
-// will be given to the rollback callback when an rollback operation is
-// received, allowing the backend to clean up some partial states.
-//
-// The data must be JSON encodable.
-//
-// This returns a unique ID that can be used to reference this WAL data.
-// WAL data cannot be modified. You can only add to the WAL and commit existing
-// WAL entries.
-func PutWAL(ctx context.Context, s logical.Storage, kind string, data interface{}) (string, error) {
- value, err := json.Marshal(&WALEntry{
- Kind: kind,
- Data: data,
- CreatedAt: time.Now().UTC().Unix(),
- })
- if err != nil {
- return "", err
- }
-
- id, err := uuid.GenerateUUID()
- if err != nil {
- return "", err
- }
-
- return id, s.Put(ctx, &logical.StorageEntry{
- Key: WALPrefix + id,
- Value: value,
- })
-}
-
-// GetWAL reads a specific entry from the WAL. If the entry doesn't exist,
-// then nil value is returned.
-//
-// The kind, value, and error are returned.
-func GetWAL(ctx context.Context, s logical.Storage, id string) (*WALEntry, error) {
- entry, err := s.Get(ctx, WALPrefix+id)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var raw WALEntry
- if err := jsonutil.DecodeJSON(entry.Value, &raw); err != nil {
- return nil, err
- }
- raw.ID = id
-
- return &raw, nil
-}
-
-// DeleteWAL commits the WAL entry with the given ID. Once committed,
-// it is assumed that the operation was a success and doesn't need to
-// be rolled back.
-func DeleteWAL(ctx context.Context, s logical.Storage, id string) error {
- return s.Delete(ctx, WALPrefix+id)
-}
-
-// ListWAL lists all the entries in the WAL.
-func ListWAL(ctx context.Context, s logical.Storage) ([]string, error) {
- keys, err := s.List(ctx, WALPrefix)
- if err != nil {
- return nil, err
- }
-
- for i, k := range keys {
- keys[i] = strings.TrimPrefix(k, WALPrefix)
- }
-
- return keys, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/identity.pb.go b/vendor/github.com/hashicorp/vault/logical/identity.pb.go
deleted file mode 100644
index cd196522..00000000
--- a/vendor/github.com/hashicorp/vault/logical/identity.pb.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: logical/identity.proto
-
-package logical
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Entity struct {
- // ID is the unique identifier for the entity
- ID string `sentinel:"" protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // Name is the human-friendly unique identifier for the entity
- Name string `sentinel:"" protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- // Aliases contains thhe alias mappings for the given entity
- Aliases []*Alias `sentinel:"" protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"`
- // Metadata represents the custom data tied to this entity
- Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Entity) Reset() { *m = Entity{} }
-func (m *Entity) String() string { return proto.CompactTextString(m) }
-func (*Entity) ProtoMessage() {}
-func (*Entity) Descriptor() ([]byte, []int) {
- return fileDescriptor_04442ca37d5e30be, []int{0}
-}
-
-func (m *Entity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Entity.Unmarshal(m, b)
-}
-func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Entity.Marshal(b, m, deterministic)
-}
-func (m *Entity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Entity.Merge(m, src)
-}
-func (m *Entity) XXX_Size() int {
- return xxx_messageInfo_Entity.Size(m)
-}
-func (m *Entity) XXX_DiscardUnknown() {
- xxx_messageInfo_Entity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Entity proto.InternalMessageInfo
-
-func (m *Entity) GetID() string {
- if m != nil {
- return m.ID
- }
- return ""
-}
-
-func (m *Entity) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Entity) GetAliases() []*Alias {
- if m != nil {
- return m.Aliases
- }
- return nil
-}
-
-func (m *Entity) GetMetadata() map[string]string {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-type Alias struct {
- // MountType is the backend mount's type to which this identity belongs
- MountType string `sentinel:"" protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"`
- // MountAccessor is the identifier of the mount entry to which this
- // identity belongs
- MountAccessor string `sentinel:"" protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"`
- // Name is the identifier of this identity in its authentication source
- Name string `sentinel:"" protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
- // Metadata represents the custom data tied to this alias
- Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Alias) Reset() { *m = Alias{} }
-func (m *Alias) String() string { return proto.CompactTextString(m) }
-func (*Alias) ProtoMessage() {}
-func (*Alias) Descriptor() ([]byte, []int) {
- return fileDescriptor_04442ca37d5e30be, []int{1}
-}
-
-func (m *Alias) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Alias.Unmarshal(m, b)
-}
-func (m *Alias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Alias.Marshal(b, m, deterministic)
-}
-func (m *Alias) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Alias.Merge(m, src)
-}
-func (m *Alias) XXX_Size() int {
- return xxx_messageInfo_Alias.Size(m)
-}
-func (m *Alias) XXX_DiscardUnknown() {
- xxx_messageInfo_Alias.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Alias proto.InternalMessageInfo
-
-func (m *Alias) GetMountType() string {
- if m != nil {
- return m.MountType
- }
- return ""
-}
-
-func (m *Alias) GetMountAccessor() string {
- if m != nil {
- return m.MountAccessor
- }
- return ""
-}
-
-func (m *Alias) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Alias) GetMetadata() map[string]string {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Entity)(nil), "logical.Entity")
- proto.RegisterMapType((map[string]string)(nil), "logical.Entity.MetadataEntry")
- proto.RegisterType((*Alias)(nil), "logical.Alias")
- proto.RegisterMapType((map[string]string)(nil), "logical.Alias.MetadataEntry")
-}
-
-func init() { proto.RegisterFile("logical/identity.proto", fileDescriptor_04442ca37d5e30be) }
-
-var fileDescriptor_04442ca37d5e30be = []byte{
- // 287 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
- 0x10, 0xc5, 0x49, 0xd2, 0x3f, 0x76, 0xa4, 0x45, 0x06, 0x91, 0x20, 0x16, 0x4a, 0x50, 0xc8, 0x29,
- 0x01, 0xbd, 0x54, 0x3d, 0x55, 0xda, 0x43, 0x0f, 0x5e, 0x82, 0x27, 0x2f, 0x32, 0x4d, 0x97, 0x66,
- 0x31, 0xc9, 0x86, 0x64, 0x52, 0xc8, 0x97, 0xf4, 0xec, 0xc7, 0x91, 0x6e, 0xb6, 0xc1, 0xe2, 0xd9,
- 0xdb, 0xec, 0xef, 0xcd, 0xce, 0xbe, 0x79, 0x0b, 0x57, 0xa9, 0xda, 0xc9, 0x98, 0xd2, 0x50, 0x6e,
- 0x45, 0xce, 0x92, 0x9b, 0xa0, 0x28, 0x15, 0x2b, 0x1c, 0x1a, 0xee, 0x7d, 0x59, 0x30, 0x58, 0x69,
- 0x05, 0x27, 0x60, 0xaf, 0x97, 0xae, 0x35, 0xb3, 0xfc, 0x51, 0x64, 0xaf, 0x97, 0x88, 0xd0, 0xcb,
- 0x29, 0x13, 0xae, 0xad, 0x89, 0xae, 0xd1, 0x87, 0x21, 0xa5, 0x92, 0x2a, 0x51, 0xb9, 0xce, 0xcc,
- 0xf1, 0xcf, 0xef, 0x27, 0x81, 0x99, 0x14, 0x2c, 0x0e, 0x3c, 0x3a, 0xca, 0xf8, 0x08, 0x67, 0x99,
- 0x60, 0xda, 0x12, 0x93, 0xdb, 0xd3, 0xad, 0xd3, 0xae, 0xb5, 0x7d, 0x30, 0x78, 0x35, 0xfa, 0x2a,
- 0xe7, 0xb2, 0x89, 0xba, 0xf6, 0xeb, 0x67, 0x18, 0x9f, 0x48, 0x78, 0x01, 0xce, 0xa7, 0x68, 0x8c,
- 0xb5, 0x43, 0x89, 0x97, 0xd0, 0xdf, 0x53, 0x5a, 0x1f, 0xcd, 0xb5, 0x87, 0x27, 0x7b, 0x6e, 0x79,
- 0xdf, 0x16, 0xf4, 0xb5, 0x15, 0x9c, 0x02, 0x64, 0xaa, 0xce, 0xf9, 0x83, 0x9b, 0x42, 0x98, 0xcb,
- 0x23, 0x4d, 0xde, 0x9a, 0x42, 0xe0, 0x1d, 0x4c, 0x5a, 0x99, 0xe2, 0x58, 0x54, 0x95, 0x2a, 0xcd,
- 0xac, 0xb1, 0xa6, 0x0b, 0x03, 0xbb, 0x14, 0x9c, 0x5f, 0x29, 0xcc, 0xff, 0xec, 0x76, 0x73, 0x1a,
- 0xc3, 0xbf, 0xac, 0xf6, 0x72, 0xfb, 0xee, 0xed, 0x24, 0x27, 0xf5, 0x26, 0x88, 0x55, 0x16, 0x26,
- 0x54, 0x25, 0x32, 0x56, 0x65, 0x11, 0xee, 0xa9, 0x4e, 0x39, 0x34, 0x06, 0x36, 0x03, 0xfd, 0xc3,
- 0x0f, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xfb, 0x6f, 0x8c, 0xfb, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/identity.proto b/vendor/github.com/hashicorp/vault/logical/identity.proto
deleted file mode 100644
index b9c56713..00000000
--- a/vendor/github.com/hashicorp/vault/logical/identity.proto
+++ /dev/null
@@ -1,34 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/hashicorp/vault/logical";
-
-package logical;
-
-message Entity {
- // ID is the unique identifier for the entity
- string ID = 1;
-
- // Name is the human-friendly unique identifier for the entity
- string name = 2;
-
- // Aliases contains thhe alias mappings for the given entity
- repeated Alias aliases = 3;
-
- // Metadata represents the custom data tied to this entity
- map metadata = 4;
-}
-
-message Alias {
- // MountType is the backend mount's type to which this identity belongs
- string mount_type = 1;
-
- // MountAccessor is the identifier of the mount entry to which this
- // identity belongs
- string mount_accessor = 2;
-
- // Name is the identifier of this identity in its authentication source
- string name = 3;
-
- // Metadata represents the custom data tied to this alias
- map metadata = 4;
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/lease.go b/vendor/github.com/hashicorp/vault/logical/lease.go
deleted file mode 100644
index 97bbe4f6..00000000
--- a/vendor/github.com/hashicorp/vault/logical/lease.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package logical
-
-import (
- "time"
-)
-
-// LeaseOptions is an embeddable struct to capture common lease
-// settings between a Secret and Auth
-type LeaseOptions struct {
- // TTL is the duration that this secret is valid for. Vault
- // will automatically revoke it after the duration.
- TTL time.Duration `json:"lease"`
-
- // MaxTTL is the maximum duration that this secret is valid for.
- MaxTTL time.Duration `json:"max_ttl"`
-
- // Renewable, if true, means that this secret can be renewed.
- Renewable bool `json:"renewable"`
-
- // Increment will be the lease increment that the user requested.
- // This is only available on a Renew operation and has no effect
- // when returning a response.
- Increment time.Duration `json:"-"`
-
- // IssueTime is the time of issue for the original lease. This is
- // only available on Renew and Revoke operations and has no effect when returning
- // a response. It can be used to enforce maximum lease periods by
- // a logical backend.
- IssueTime time.Time `json:"-"`
-}
-
-// LeaseEnabled checks if leasing is enabled
-func (l *LeaseOptions) LeaseEnabled() bool {
- return l.TTL > 0
-}
-
-// LeaseTotal is the lease duration with a guard against a negative TTL
-func (l *LeaseOptions) LeaseTotal() time.Duration {
- if l.TTL <= 0 {
- return 0
- }
-
- return l.TTL
-}
-
-// ExpirationTime computes the time until expiration including the grace period
-func (l *LeaseOptions) ExpirationTime() time.Time {
- var expireTime time.Time
- if l.LeaseEnabled() {
- expireTime = time.Now().Add(l.LeaseTotal())
- }
- return expireTime
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/logical.go b/vendor/github.com/hashicorp/vault/logical/logical.go
deleted file mode 100644
index a3456e96..00000000
--- a/vendor/github.com/hashicorp/vault/logical/logical.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package logical
-
-import (
- "context"
-
- log "github.com/hashicorp/go-hclog"
-)
-
-// BackendType is the type of backend that is being implemented
-type BackendType uint32
-
-// The these are the types of backends that can be derived from
-// logical.Backend
-const (
- TypeUnknown BackendType = 0 // This is also the zero-value for BackendType
- TypeLogical BackendType = 1
- TypeCredential BackendType = 2
-)
-
-// Stringer implementation
-func (b BackendType) String() string {
- switch b {
- case TypeLogical:
- return "secret"
- case TypeCredential:
- return "auth"
- }
-
- return "unknown"
-}
-
-// Backend interface must be implemented to be "mountable" at
-// a given path. Requests flow through a router which has various mount
-// points that flow to a logical backend. The logic of each backend is flexible,
-// and this is what allows materialized keys to function. There can be specialized
-// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can
-// interact with remote APIs to generate keys dynamically. This interface also
-// allows for a "procfs" like interaction, as internal state can be exposed by
-// acting like a logical backend and being mounted.
-type Backend interface {
- // HandleRequest is used to handle a request and generate a response.
- // The backends must check the operation type and handle appropriately.
- HandleRequest(context.Context, *Request) (*Response, error)
-
- // SpecialPaths is a list of paths that are special in some way.
- // See PathType for the types of special paths. The key is the type
- // of the special path, and the value is a list of paths for this type.
- // This is not a regular expression but is an exact match. If the path
- // ends in '*' then it is a prefix-based match. The '*' can only appear
- // at the end.
- SpecialPaths() *Paths
-
- // System provides an interface to access certain system configuration
- // information, such as globally configured default and max lease TTLs.
- System() SystemView
-
- // Logger provides an interface to access the underlying logger. This
- // is useful when a struct embeds a Backend-implemented struct that
- // contains a private instance of logger.
- Logger() log.Logger
-
- // HandleExistenceCheck is used to handle a request and generate a response
- // indicating whether the given path exists or not; this is used to
- // understand whether the request must have a Create or Update capability
- // ACL applied. The first bool indicates whether an existence check
- // function was found for the backend; the second indicates whether, if an
- // existence check function was found, the item exists or not.
- HandleExistenceCheck(context.Context, *Request) (bool, bool, error)
-
- // Cleanup is invoked during an unmount of a backend to allow it to
- // handle any cleanup like connection closing or releasing of file handles.
- Cleanup(context.Context)
-
- // InvalidateKey may be invoked when an object is modified that belongs
- // to the backend. The backend can use this to clear any caches or reset
- // internal state as needed.
- InvalidateKey(context.Context, string)
-
- // Setup is used to set up the backend based on the provided backend
- // configuration.
- Setup(context.Context, *BackendConfig) error
-
- // Type returns the BackendType for the particular backend
- Type() BackendType
-}
-
-// BackendConfig is provided to the factory to initialize the backend
-type BackendConfig struct {
- // View should not be stored, and should only be used for initialization
- StorageView Storage
-
- // The backend should use this logger. The log should not contain any secrets.
- Logger log.Logger
-
- // System provides a view into a subset of safe system information that
- // is useful for backends, such as the default/max lease TTLs
- System SystemView
-
- // BackendUUID is a unique identifier provided to this backend. It's useful
- // when a backend needs a consistent and unique string without using storage.
- BackendUUID string
-
- // Config is the opaque user configuration provided when mounting
- Config map[string]string
-}
-
-// Factory is the factory function to create a logical backend.
-type Factory func(context.Context, *BackendConfig) (Backend, error)
-
-// Paths is the structure of special paths that is used for SpecialPaths.
-type Paths struct {
- // Root are the paths that require a root token to access
- Root []string
-
- // Unauthenticated are the paths that can be accessed without any auth.
- Unauthenticated []string
-
- // LocalStorage are paths (prefixes) that are local to this instance; this
- // indicates that these paths should not be replicated
- LocalStorage []string
-
- // SealWrapStorage are storage paths that, when using a capable seal,
- // should be seal wrapped with extra encryption. It is exact matching
- // unless it ends with '/' in which case it will be treated as a prefix.
- SealWrapStorage []string
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin.pb.go b/vendor/github.com/hashicorp/vault/logical/plugin.pb.go
deleted file mode 100644
index b66bea54..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin.pb.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: logical/plugin.proto
-
-package logical
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type PluginEnvironment struct {
- // VaultVersion is the version of the Vault server
- VaultVersion string `protobuf:"bytes,1,opt,name=vault_version,json=vaultVersion,proto3" json:"vault_version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PluginEnvironment) Reset() { *m = PluginEnvironment{} }
-func (m *PluginEnvironment) String() string { return proto.CompactTextString(m) }
-func (*PluginEnvironment) ProtoMessage() {}
-func (*PluginEnvironment) Descriptor() ([]byte, []int) {
- return fileDescriptor_0f04cd6a1a3a5255, []int{0}
-}
-
-func (m *PluginEnvironment) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PluginEnvironment.Unmarshal(m, b)
-}
-func (m *PluginEnvironment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PluginEnvironment.Marshal(b, m, deterministic)
-}
-func (m *PluginEnvironment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PluginEnvironment.Merge(m, src)
-}
-func (m *PluginEnvironment) XXX_Size() int {
- return xxx_messageInfo_PluginEnvironment.Size(m)
-}
-func (m *PluginEnvironment) XXX_DiscardUnknown() {
- xxx_messageInfo_PluginEnvironment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PluginEnvironment proto.InternalMessageInfo
-
-func (m *PluginEnvironment) GetVaultVersion() string {
- if m != nil {
- return m.VaultVersion
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*PluginEnvironment)(nil), "logical.PluginEnvironment")
-}
-
-func init() { proto.RegisterFile("logical/plugin.proto", fileDescriptor_0f04cd6a1a3a5255) }
-
-var fileDescriptor_0f04cd6a1a3a5255 = []byte{
- // 133 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc9, 0xc9, 0x4f, 0xcf,
- 0x4c, 0x4e, 0xcc, 0xd1, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9,
- 0x17, 0x62, 0x87, 0x8a, 0x2a, 0x59, 0x70, 0x09, 0x06, 0x80, 0x25, 0x5c, 0xf3, 0xca, 0x32, 0x8b,
- 0xf2, 0xf3, 0x72, 0x53, 0xf3, 0x4a, 0x84, 0x94, 0xb9, 0x78, 0xcb, 0x12, 0x4b, 0x73, 0x4a, 0xe2,
- 0xcb, 0x52, 0x8b, 0x8a, 0x33, 0xf3, 0xf3, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x78, 0xc0,
- 0x82, 0x61, 0x10, 0x31, 0x27, 0x95, 0x28, 0xa5, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4,
- 0xfc, 0x5c, 0xfd, 0x8c, 0xc4, 0xe2, 0x8c, 0xcc, 0xe4, 0xfc, 0xa2, 0x02, 0x7d, 0xb0, 0x22, 0x7d,
- 0xa8, 0xf9, 0x49, 0x6c, 0x60, 0xfb, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa3, 0xff, 0x48,
- 0xa9, 0x87, 0x00, 0x00, 0x00,
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin.proto b/vendor/github.com/hashicorp/vault/logical/plugin.proto
deleted file mode 100644
index ec849347..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin.proto
+++ /dev/null
@@ -1,10 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/hashicorp/vault/logical";
-
-package logical;
-
-message PluginEnvironment {
- // VaultVersion is the version of the Vault server
- string vault_version = 1;
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend.go
deleted file mode 100644
index ac367c16..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/backend.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package plugin
-
-import (
- "context"
- "net/rpc"
- "sync/atomic"
-
- "google.golang.org/grpc"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin/pb"
-)
-
-var _ plugin.Plugin = (*BackendPlugin)(nil)
-var _ plugin.GRPCPlugin = (*BackendPlugin)(nil)
-var _ plugin.Plugin = (*GRPCBackendPlugin)(nil)
-var _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil)
-
-// BackendPlugin is the plugin.Plugin implementation
-type BackendPlugin struct {
- *GRPCBackendPlugin
-}
-
-// GRPCBackendPlugin is the plugin.Plugin implementation that only supports GRPC
-// transport
-type GRPCBackendPlugin struct {
- Factory logical.Factory
- MetadataMode bool
- Logger log.Logger
-
- // Embeding this will disable the netRPC protocol
- plugin.NetRPCUnsupportedPlugin
-}
-
-// Server gets called when on plugin.Serve()
-func (b *BackendPlugin) Server(broker *plugin.MuxBroker) (interface{}, error) {
- return &backendPluginServer{
- factory: b.Factory,
- broker: broker,
- // We pass the logger down into the backend so go-plugin will forward
- // logs for us.
- logger: b.Logger,
- }, nil
-}
-
-// Client gets called on plugin.NewClient()
-func (b BackendPlugin) Client(broker *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
- return &backendPluginClient{
- client: c,
- broker: broker,
- metadataMode: b.MetadataMode,
- }, nil
-}
-
-func (b GRPCBackendPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
- pb.RegisterBackendServer(s, &backendGRPCPluginServer{
- broker: broker,
- factory: b.Factory,
- // We pass the logger down into the backend so go-plugin will forward
- // logs for us.
- logger: b.Logger,
- })
- return nil
-}
-
-func (b *GRPCBackendPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
- ret := &backendGRPCPluginClient{
- client: pb.NewBackendClient(c),
- clientConn: c,
- broker: broker,
- cleanupCh: make(chan struct{}),
- doneCtx: ctx,
- metadataMode: b.MetadataMode,
- }
-
- // Create the value and set the type
- ret.server = new(atomic.Value)
- ret.server.Store((*grpc.Server)(nil))
-
- return ret, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go
deleted file mode 100644
index 43a442f4..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/backend_client.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package plugin
-
-import (
- "context"
- "errors"
- "net/rpc"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode")
-)
-
-// backendPluginClient implements logical.Backend and is the
-// go-plugin client.
-type backendPluginClient struct {
- broker *plugin.MuxBroker
- client *rpc.Client
- metadataMode bool
-
- system logical.SystemView
- logger log.Logger
-}
-
-// HandleRequestArgs is the args for HandleRequest method.
-type HandleRequestArgs struct {
- StorageID uint32
- Request *logical.Request
-}
-
-// HandleRequestReply is the reply for HandleRequest method.
-type HandleRequestReply struct {
- Response *logical.Response
- Error error
-}
-
-// SpecialPathsReply is the reply for SpecialPaths method.
-type SpecialPathsReply struct {
- Paths *logical.Paths
-}
-
-// SystemReply is the reply for System method.
-type SystemReply struct {
- SystemView logical.SystemView
- Error error
-}
-
-// HandleExistenceCheckArgs is the args for HandleExistenceCheck method.
-type HandleExistenceCheckArgs struct {
- StorageID uint32
- Request *logical.Request
-}
-
-// HandleExistenceCheckReply is the reply for HandleExistenceCheck method.
-type HandleExistenceCheckReply struct {
- CheckFound bool
- Exists bool
- Error error
-}
-
-// SetupArgs is the args for Setup method.
-type SetupArgs struct {
- StorageID uint32
- LoggerID uint32
- SysViewID uint32
- Config map[string]string
- BackendUUID string
-}
-
-// SetupReply is the reply for Setup method.
-type SetupReply struct {
- Error error
-}
-
-// TypeReply is the reply for the Type method.
-type TypeReply struct {
- Type logical.BackendType
-}
-
-func (b *backendPluginClient) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) {
- if b.metadataMode {
- return nil, ErrClientInMetadataMode
- }
-
- // Do not send the storage, since go-plugin cannot serialize
- // interfaces. The server will pick up the storage from the shim.
- req.Storage = nil
- args := &HandleRequestArgs{
- Request: req,
- }
- var reply HandleRequestReply
-
- if req.Connection != nil {
- oldConnState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- req.Connection.ConnState = oldConnState
- }()
- }
-
- err := b.client.Call("Plugin.HandleRequest", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- if reply.Error.Error() == logical.ErrUnsupportedOperation.Error() {
- return nil, logical.ErrUnsupportedOperation
- }
-
- return reply.Response, reply.Error
- }
-
- return reply.Response, nil
-}
-
-func (b *backendPluginClient) SpecialPaths() *logical.Paths {
- var reply SpecialPathsReply
- err := b.client.Call("Plugin.SpecialPaths", new(interface{}), &reply)
- if err != nil {
- return nil
- }
-
- return reply.Paths
-}
-
-// System returns vault's system view. The backend client stores the view during
-// Setup, so there is no need to shim the system just to get it back.
-func (b *backendPluginClient) System() logical.SystemView {
- return b.system
-}
-
-// Logger returns vault's logger. The backend client stores the logger during
-// Setup, so there is no need to shim the logger just to get it back.
-func (b *backendPluginClient) Logger() log.Logger {
- return b.logger
-}
-
-func (b *backendPluginClient) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) {
- if b.metadataMode {
- return false, false, ErrClientInMetadataMode
- }
-
- // Do not send the storage, since go-plugin cannot serialize
- // interfaces. The server will pick up the storage from the shim.
- req.Storage = nil
- args := &HandleExistenceCheckArgs{
- Request: req,
- }
- var reply HandleExistenceCheckReply
-
- if req.Connection != nil {
- oldConnState := req.Connection.ConnState
- req.Connection.ConnState = nil
- defer func() {
- req.Connection.ConnState = oldConnState
- }()
- }
-
- err := b.client.Call("Plugin.HandleExistenceCheck", args, &reply)
- if err != nil {
- return false, false, err
- }
- if reply.Error != nil {
- // THINKING: Should be be a switch on all error types?
- if reply.Error.Error() == logical.ErrUnsupportedPath.Error() {
- return false, false, logical.ErrUnsupportedPath
- }
- return false, false, reply.Error
- }
-
- return reply.CheckFound, reply.Exists, nil
-}
-
-func (b *backendPluginClient) Cleanup(ctx context.Context) {
- b.client.Call("Plugin.Cleanup", new(interface{}), &struct{}{})
-}
-
-func (b *backendPluginClient) Initialize(ctx context.Context) error {
- if b.metadataMode {
- return ErrClientInMetadataMode
- }
- err := b.client.Call("Plugin.Initialize", new(interface{}), &struct{}{})
- return err
-}
-
-func (b *backendPluginClient) InvalidateKey(ctx context.Context, key string) {
- if b.metadataMode {
- return
- }
- b.client.Call("Plugin.InvalidateKey", key, &struct{}{})
-}
-
-func (b *backendPluginClient) Setup(ctx context.Context, config *logical.BackendConfig) error {
- // Shim logical.Storage
- storageImpl := config.StorageView
- if b.metadataMode {
- storageImpl = &NOOPStorage{}
- }
- storageID := b.broker.NextId()
- go b.broker.AcceptAndServe(storageID, &StorageServer{
- impl: storageImpl,
- })
-
- // Shim logical.SystemView
- sysViewImpl := config.System
- if b.metadataMode {
- sysViewImpl = &logical.StaticSystemView{}
- }
- sysViewID := b.broker.NextId()
- go b.broker.AcceptAndServe(sysViewID, &SystemViewServer{
- impl: sysViewImpl,
- })
-
- args := &SetupArgs{
- StorageID: storageID,
- SysViewID: sysViewID,
- Config: config.Config,
- BackendUUID: config.BackendUUID,
- }
- var reply SetupReply
-
- err := b.client.Call("Plugin.Setup", args, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
-
- // Set system and logger for getter methods
- b.system = config.System
- b.logger = config.Logger
-
- return nil
-}
-
-func (b *backendPluginClient) Type() logical.BackendType {
- var reply TypeReply
- err := b.client.Call("Plugin.Type", new(interface{}), &reply)
- if err != nil {
- return logical.TypeUnknown
- }
-
- return logical.BackendType(reply.Type)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go b/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go
deleted file mode 100644
index a03e089f..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/backend_server.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package plugin
-
-import (
- "context"
- "errors"
- "net/rpc"
-
- "github.com/hashicorp/go-hclog"
-
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode")
-)
-
-// backendPluginServer is the RPC server that backendPluginClient talks to,
-// it methods conforming to requirements by net/rpc
-type backendPluginServer struct {
- broker *plugin.MuxBroker
- backend logical.Backend
- factory logical.Factory
-
- logger hclog.Logger
- sysViewClient *rpc.Client
- storageClient *rpc.Client
-}
-
-func (b *backendPluginServer) HandleRequest(args *HandleRequestArgs, reply *HandleRequestReply) error {
- if pluginutil.InMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- storage := &StorageClient{client: b.storageClient}
- args.Request.Storage = storage
-
- resp, err := b.backend.HandleRequest(context.Background(), args.Request)
- *reply = HandleRequestReply{
- Response: resp,
- Error: wrapError(err),
- }
-
- return nil
-}
-
-func (b *backendPluginServer) SpecialPaths(_ interface{}, reply *SpecialPathsReply) error {
- *reply = SpecialPathsReply{
- Paths: b.backend.SpecialPaths(),
- }
- return nil
-}
-
-func (b *backendPluginServer) HandleExistenceCheck(args *HandleExistenceCheckArgs, reply *HandleExistenceCheckReply) error {
- if pluginutil.InMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- storage := &StorageClient{client: b.storageClient}
- args.Request.Storage = storage
-
- checkFound, exists, err := b.backend.HandleExistenceCheck(context.TODO(), args.Request)
- *reply = HandleExistenceCheckReply{
- CheckFound: checkFound,
- Exists: exists,
- Error: wrapError(err),
- }
-
- return nil
-}
-
-func (b *backendPluginServer) Cleanup(_ interface{}, _ *struct{}) error {
- b.backend.Cleanup(context.Background())
-
- // Close rpc clients
- b.sysViewClient.Close()
- b.storageClient.Close()
- return nil
-}
-
-func (b *backendPluginServer) InvalidateKey(args string, _ *struct{}) error {
- if pluginutil.InMetadataMode() {
- return ErrServerInMetadataMode
- }
-
- b.backend.InvalidateKey(context.Background(), args)
- return nil
-}
-
-// Setup dials into the plugin's broker to get a shimmed storage, logger, and
-// system view of the backend. This method also instantiates the underlying
-// backend through its factory func for the server side of the plugin.
-func (b *backendPluginServer) Setup(args *SetupArgs, reply *SetupReply) error {
- // Dial for storage
- storageConn, err := b.broker.Dial(args.StorageID)
- if err != nil {
- *reply = SetupReply{
- Error: wrapError(err),
- }
- return nil
- }
- rawStorageClient := rpc.NewClient(storageConn)
- b.storageClient = rawStorageClient
-
- storage := &StorageClient{client: rawStorageClient}
-
- // Dial for sys view
- sysViewConn, err := b.broker.Dial(args.SysViewID)
- if err != nil {
- *reply = SetupReply{
- Error: wrapError(err),
- }
- return nil
- }
- rawSysViewClient := rpc.NewClient(sysViewConn)
- b.sysViewClient = rawSysViewClient
-
- sysView := &SystemViewClient{client: rawSysViewClient}
-
- config := &logical.BackendConfig{
- StorageView: storage,
- Logger: b.logger,
- System: sysView,
- Config: args.Config,
- BackendUUID: args.BackendUUID,
- }
-
- // Call the underlying backend factory after shims have been created
- // to set b.backend
- backend, err := b.factory(context.Background(), config)
- if err != nil {
- *reply = SetupReply{
- Error: wrapError(err),
- }
- }
- b.backend = backend
-
- return nil
-}
-
-func (b *backendPluginServer) Type(_ interface{}, reply *TypeReply) error {
- *reply = TypeReply{
- Type: b.backend.Type(),
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend.go b/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend.go
deleted file mode 100644
index a65eeebe..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package plugin
-
-import (
- "math"
-
- "google.golang.org/grpc"
-)
-
-var largeMsgGRPCCallOpts []grpc.CallOption = []grpc.CallOption{
- grpc.MaxCallSendMsgSize(math.MaxInt32),
- grpc.MaxCallRecvMsgSize(math.MaxInt32),
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend_client.go b/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend_client.go
deleted file mode 100644
index 60ef1828..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend_client.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package plugin
-
-import (
- "context"
- "errors"
- "math"
- "sync/atomic"
-
- "google.golang.org/grpc"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin/pb"
-)
-
-var ErrPluginShutdown = errors.New("plugin is shut down")
-
-// Validate backendGRPCPluginClient satisfies the logical.Backend interface
-var _ logical.Backend = &backendGRPCPluginClient{}
-
-// backendPluginClient implements logical.Backend and is the
-// go-plugin client.
-type backendGRPCPluginClient struct {
- broker *plugin.GRPCBroker
- client pb.BackendClient
- metadataMode bool
-
- system logical.SystemView
- logger log.Logger
-
- // This is used to signal to the Cleanup function that it can proceed
- // because we have a defined server
- cleanupCh chan struct{}
-
- // server is the grpc server used for serving storage and sysview requests.
- server *atomic.Value
-
- // clientConn is the underlying grpc connection to the server, we store it
- // so it can be cleaned up.
- clientConn *grpc.ClientConn
- doneCtx context.Context
-}
-
-func (b *backendGRPCPluginClient) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) {
- if b.metadataMode {
- return nil, ErrClientInMetadataMode
- }
-
- ctx, cancel := context.WithCancel(ctx)
- quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx)
- defer close(quitCh)
- defer cancel()
-
- protoReq, err := pb.LogicalRequestToProtoRequest(req)
- if err != nil {
- return nil, err
- }
-
- reply, err := b.client.HandleRequest(ctx, &pb.HandleRequestArgs{
- Request: protoReq,
- }, largeMsgGRPCCallOpts...)
- if err != nil {
- if b.doneCtx.Err() != nil {
- return nil, ErrPluginShutdown
- }
-
- return nil, err
- }
- resp, err := pb.ProtoResponseToLogicalResponse(reply.Response)
- if err != nil {
- return nil, err
- }
- if reply.Err != nil {
- return resp, pb.ProtoErrToErr(reply.Err)
- }
-
- return resp, nil
-}
-
-func (b *backendGRPCPluginClient) SpecialPaths() *logical.Paths {
- reply, err := b.client.SpecialPaths(b.doneCtx, &pb.Empty{})
- if err != nil {
- return nil
- }
-
- if reply.Paths == nil {
- return nil
- }
-
- return &logical.Paths{
- Root: reply.Paths.Root,
- Unauthenticated: reply.Paths.Unauthenticated,
- LocalStorage: reply.Paths.LocalStorage,
- SealWrapStorage: reply.Paths.SealWrapStorage,
- }
-}
-
-// System returns vault's system view. The backend client stores the view during
-// Setup, so there is no need to shim the system just to get it back.
-func (b *backendGRPCPluginClient) System() logical.SystemView {
- return b.system
-}
-
-// Logger returns vault's logger. The backend client stores the logger during
-// Setup, so there is no need to shim the logger just to get it back.
-func (b *backendGRPCPluginClient) Logger() log.Logger {
- return b.logger
-}
-
-func (b *backendGRPCPluginClient) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) {
- if b.metadataMode {
- return false, false, ErrClientInMetadataMode
- }
-
- protoReq, err := pb.LogicalRequestToProtoRequest(req)
- if err != nil {
- return false, false, err
- }
-
- ctx, cancel := context.WithCancel(ctx)
- quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx)
- defer close(quitCh)
- defer cancel()
- reply, err := b.client.HandleExistenceCheck(ctx, &pb.HandleExistenceCheckArgs{
- Request: protoReq,
- }, largeMsgGRPCCallOpts...)
- if err != nil {
- if b.doneCtx.Err() != nil {
- return false, false, ErrPluginShutdown
- }
- return false, false, err
- }
- if reply.Err != nil {
- return false, false, pb.ProtoErrToErr(reply.Err)
- }
-
- return reply.CheckFound, reply.Exists, nil
-}
-
-func (b *backendGRPCPluginClient) Cleanup(ctx context.Context) {
- ctx, cancel := context.WithCancel(ctx)
- quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx)
- defer close(quitCh)
- defer cancel()
-
- b.client.Cleanup(ctx, &pb.Empty{})
-
- // This will block until Setup has run the function to create a new server
- // in b.server. If we stop here before it has a chance to actually start
- // listening, when it starts listening it will immediatley error out and
- // exit, which is fine. Overall this ensures that we do not miss stopping
- // the server if it ends up being created after Cleanup is called.
- <-b.cleanupCh
- server := b.server.Load()
- if server != nil {
- server.(*grpc.Server).GracefulStop()
- }
- b.clientConn.Close()
-}
-
-func (b *backendGRPCPluginClient) InvalidateKey(ctx context.Context, key string) {
- if b.metadataMode {
- return
- }
-
- ctx, cancel := context.WithCancel(ctx)
- quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx)
- defer close(quitCh)
- defer cancel()
-
- b.client.InvalidateKey(ctx, &pb.InvalidateKeyArgs{
- Key: key,
- })
-}
-
-func (b *backendGRPCPluginClient) Setup(ctx context.Context, config *logical.BackendConfig) error {
- // Shim logical.Storage
- storageImpl := config.StorageView
- if b.metadataMode {
- storageImpl = &NOOPStorage{}
- }
- storage := &GRPCStorageServer{
- impl: storageImpl,
- }
-
- // Shim logical.SystemView
- sysViewImpl := config.System
- if b.metadataMode {
- sysViewImpl = &logical.StaticSystemView{}
- }
- sysView := &gRPCSystemViewServer{
- impl: sysViewImpl,
- }
-
- // Register the server in this closure.
- serverFunc := func(opts []grpc.ServerOption) *grpc.Server {
- opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32))
- opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32))
-
- s := grpc.NewServer(opts...)
- pb.RegisterSystemViewServer(s, sysView)
- pb.RegisterStorageServer(s, storage)
- b.server.Store(s)
- close(b.cleanupCh)
- return s
- }
- brokerID := b.broker.NextId()
- go b.broker.AcceptAndServe(brokerID, serverFunc)
-
- args := &pb.SetupArgs{
- BrokerID: brokerID,
- Config: config.Config,
- BackendUUID: config.BackendUUID,
- }
-
- ctx, cancel := context.WithCancel(ctx)
- quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx)
- defer close(quitCh)
- defer cancel()
-
- reply, err := b.client.Setup(ctx, args)
- if err != nil {
- return err
- }
- if reply.Err != "" {
- return errors.New(reply.Err)
- }
-
- // Set system and logger for getter methods
- b.system = config.System
- b.logger = config.Logger
-
- return nil
-}
-
-func (b *backendGRPCPluginClient) Type() logical.BackendType {
- reply, err := b.client.Type(b.doneCtx, &pb.Empty{})
- if err != nil {
- return logical.TypeUnknown
- }
-
- return logical.BackendType(reply.Type)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend_server.go b/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend_server.go
deleted file mode 100644
index 7869a70b..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_backend_server.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package plugin
-
-import (
- "context"
-
- log "github.com/hashicorp/go-hclog"
- plugin "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin/pb"
- "google.golang.org/grpc"
-)
-
-type backendGRPCPluginServer struct {
- broker *plugin.GRPCBroker
- backend logical.Backend
-
- factory logical.Factory
-
- brokeredClient *grpc.ClientConn
-
- logger log.Logger
-}
-
-// Setup dials into the plugin's broker to get a shimmed storage, logger, and
-// system view of the backend. This method also instantiates the underlying
-// backend through its factory func for the server side of the plugin.
-func (b *backendGRPCPluginServer) Setup(ctx context.Context, args *pb.SetupArgs) (*pb.SetupReply, error) {
- // Dial for storage
- brokeredClient, err := b.broker.Dial(args.BrokerID)
- if err != nil {
- return &pb.SetupReply{}, err
- }
- b.brokeredClient = brokeredClient
- storage := newGRPCStorageClient(brokeredClient)
- sysView := newGRPCSystemView(brokeredClient)
-
- config := &logical.BackendConfig{
- StorageView: storage,
- Logger: b.logger,
- System: sysView,
- Config: args.Config,
- BackendUUID: args.BackendUUID,
- }
-
- // Call the underlying backend factory after shims have been created
- // to set b.backend
- backend, err := b.factory(ctx, config)
- if err != nil {
- return &pb.SetupReply{
- Err: pb.ErrToString(err),
- }, nil
- }
- b.backend = backend
-
- return &pb.SetupReply{}, nil
-}
-
-func (b *backendGRPCPluginServer) HandleRequest(ctx context.Context, args *pb.HandleRequestArgs) (*pb.HandleRequestReply, error) {
- if pluginutil.InMetadataMode() {
- return &pb.HandleRequestReply{}, ErrServerInMetadataMode
- }
-
- logicalReq, err := pb.ProtoRequestToLogicalRequest(args.Request)
- if err != nil {
- return &pb.HandleRequestReply{}, err
- }
-
- logicalReq.Storage = newGRPCStorageClient(b.brokeredClient)
-
- resp, respErr := b.backend.HandleRequest(ctx, logicalReq)
-
- pbResp, err := pb.LogicalResponseToProtoResponse(resp)
- if err != nil {
- return &pb.HandleRequestReply{}, err
- }
-
- return &pb.HandleRequestReply{
- Response: pbResp,
- Err: pb.ErrToProtoErr(respErr),
- }, nil
-}
-
-func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Empty) (*pb.SpecialPathsReply, error) {
- paths := b.backend.SpecialPaths()
- if paths == nil {
- return &pb.SpecialPathsReply{
- Paths: nil,
- }, nil
- }
-
- return &pb.SpecialPathsReply{
- Paths: &pb.Paths{
- Root: paths.Root,
- Unauthenticated: paths.Unauthenticated,
- LocalStorage: paths.LocalStorage,
- SealWrapStorage: paths.SealWrapStorage,
- },
- }, nil
-}
-
-func (b *backendGRPCPluginServer) HandleExistenceCheck(ctx context.Context, args *pb.HandleExistenceCheckArgs) (*pb.HandleExistenceCheckReply, error) {
- if pluginutil.InMetadataMode() {
- return &pb.HandleExistenceCheckReply{}, ErrServerInMetadataMode
- }
-
- logicalReq, err := pb.ProtoRequestToLogicalRequest(args.Request)
- if err != nil {
- return &pb.HandleExistenceCheckReply{}, err
- }
- logicalReq.Storage = newGRPCStorageClient(b.brokeredClient)
-
- checkFound, exists, err := b.backend.HandleExistenceCheck(ctx, logicalReq)
- return &pb.HandleExistenceCheckReply{
- CheckFound: checkFound,
- Exists: exists,
- Err: pb.ErrToProtoErr(err),
- }, nil
-}
-
-func (b *backendGRPCPluginServer) Cleanup(ctx context.Context, _ *pb.Empty) (*pb.Empty, error) {
- b.backend.Cleanup(ctx)
-
- // Close rpc clients
- b.brokeredClient.Close()
- return &pb.Empty{}, nil
-}
-
-func (b *backendGRPCPluginServer) InvalidateKey(ctx context.Context, args *pb.InvalidateKeyArgs) (*pb.Empty, error) {
- if pluginutil.InMetadataMode() {
- return &pb.Empty{}, ErrServerInMetadataMode
- }
-
- b.backend.InvalidateKey(ctx, args.Key)
- return &pb.Empty{}, nil
-}
-
-func (b *backendGRPCPluginServer) Type(ctx context.Context, _ *pb.Empty) (*pb.TypeReply, error) {
- return &pb.TypeReply{
- Type: uint32(b.backend.Type()),
- }, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_storage.go b/vendor/github.com/hashicorp/vault/logical/plugin/grpc_storage.go
deleted file mode 100644
index ffe13390..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_storage.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package plugin
-
-import (
- "context"
- "errors"
-
- "google.golang.org/grpc"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin/pb"
-)
-
-func newGRPCStorageClient(conn *grpc.ClientConn) *GRPCStorageClient {
- return &GRPCStorageClient{
- client: pb.NewStorageClient(conn),
- }
-}
-
-// GRPCStorageClient is an implementation of logical.Storage that communicates
-// over RPC.
-type GRPCStorageClient struct {
- client pb.StorageClient
-}
-
-func (s *GRPCStorageClient) List(ctx context.Context, prefix string) ([]string, error) {
- reply, err := s.client.List(ctx, &pb.StorageListArgs{
- Prefix: prefix,
- }, largeMsgGRPCCallOpts...)
- if err != nil {
- return []string{}, err
- }
- if reply.Err != "" {
- return reply.Keys, errors.New(reply.Err)
- }
- return reply.Keys, nil
-}
-
-func (s *GRPCStorageClient) Get(ctx context.Context, key string) (*logical.StorageEntry, error) {
- reply, err := s.client.Get(ctx, &pb.StorageGetArgs{
- Key: key,
- }, largeMsgGRPCCallOpts...)
- if err != nil {
- return nil, err
- }
- if reply.Err != "" {
- return nil, errors.New(reply.Err)
- }
- return pb.ProtoStorageEntryToLogicalStorageEntry(reply.Entry), nil
-}
-
-func (s *GRPCStorageClient) Put(ctx context.Context, entry *logical.StorageEntry) error {
- reply, err := s.client.Put(ctx, &pb.StoragePutArgs{
- Entry: pb.LogicalStorageEntryToProtoStorageEntry(entry),
- }, largeMsgGRPCCallOpts...)
- if err != nil {
- return err
- }
- if reply.Err != "" {
- return errors.New(reply.Err)
- }
- return nil
-}
-
-func (s *GRPCStorageClient) Delete(ctx context.Context, key string) error {
- reply, err := s.client.Delete(ctx, &pb.StorageDeleteArgs{
- Key: key,
- })
- if err != nil {
- return err
- }
- if reply.Err != "" {
- return errors.New(reply.Err)
- }
- return nil
-}
-
-// StorageServer is a net/rpc compatible structure for serving
-type GRPCStorageServer struct {
- impl logical.Storage
-}
-
-func (s *GRPCStorageServer) List(ctx context.Context, args *pb.StorageListArgs) (*pb.StorageListReply, error) {
- keys, err := s.impl.List(ctx, args.Prefix)
- return &pb.StorageListReply{
- Keys: keys,
- Err: pb.ErrToString(err),
- }, nil
-}
-
-func (s *GRPCStorageServer) Get(ctx context.Context, args *pb.StorageGetArgs) (*pb.StorageGetReply, error) {
- storageEntry, err := s.impl.Get(ctx, args.Key)
- return &pb.StorageGetReply{
- Entry: pb.LogicalStorageEntryToProtoStorageEntry(storageEntry),
- Err: pb.ErrToString(err),
- }, nil
-}
-
-func (s *GRPCStorageServer) Put(ctx context.Context, args *pb.StoragePutArgs) (*pb.StoragePutReply, error) {
- err := s.impl.Put(ctx, pb.ProtoStorageEntryToLogicalStorageEntry(args.Entry))
- return &pb.StoragePutReply{
- Err: pb.ErrToString(err),
- }, nil
-}
-
-func (s *GRPCStorageServer) Delete(ctx context.Context, args *pb.StorageDeleteArgs) (*pb.StorageDeleteReply, error) {
- err := s.impl.Delete(ctx, args.Key)
- return &pb.StorageDeleteReply{
- Err: pb.ErrToString(err),
- }, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_system.go b/vendor/github.com/hashicorp/vault/logical/plugin/grpc_system.go
deleted file mode 100644
index 5b7a5824..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/grpc_system.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package plugin
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/license"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/plugin/pb"
- "google.golang.org/grpc"
-)
-
-func newGRPCSystemView(conn *grpc.ClientConn) *gRPCSystemViewClient {
- return &gRPCSystemViewClient{
- client: pb.NewSystemViewClient(conn),
- }
-}
-
-type gRPCSystemViewClient struct {
- client pb.SystemViewClient
-}
-
-func (s *gRPCSystemViewClient) DefaultLeaseTTL() time.Duration {
- reply, err := s.client.DefaultLeaseTTL(context.Background(), &pb.Empty{})
- if err != nil {
- return 0
- }
-
- return time.Duration(reply.TTL)
-}
-
-func (s *gRPCSystemViewClient) MaxLeaseTTL() time.Duration {
- reply, err := s.client.MaxLeaseTTL(context.Background(), &pb.Empty{})
- if err != nil {
- return 0
- }
-
- return time.Duration(reply.TTL)
-}
-
-func (s *gRPCSystemViewClient) SudoPrivilege(ctx context.Context, path string, token string) bool {
- reply, err := s.client.SudoPrivilege(ctx, &pb.SudoPrivilegeArgs{
- Path: path,
- Token: token,
- })
- if err != nil {
- return false
- }
-
- return reply.Sudo
-}
-
-func (s *gRPCSystemViewClient) Tainted() bool {
- reply, err := s.client.Tainted(context.Background(), &pb.Empty{})
- if err != nil {
- return false
- }
-
- return reply.Tainted
-}
-
-func (s *gRPCSystemViewClient) CachingDisabled() bool {
- reply, err := s.client.CachingDisabled(context.Background(), &pb.Empty{})
- if err != nil {
- return false
- }
-
- return reply.Disabled
-}
-
-func (s *gRPCSystemViewClient) ReplicationState() consts.ReplicationState {
- reply, err := s.client.ReplicationState(context.Background(), &pb.Empty{})
- if err != nil {
- return consts.ReplicationUnknown
- }
-
- return consts.ReplicationState(reply.State)
-}
-
-func (s *gRPCSystemViewClient) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- buf, err := json.Marshal(data)
- if err != nil {
- return nil, err
- }
-
- reply, err := s.client.ResponseWrapData(ctx, &pb.ResponseWrapDataArgs{
- Data: string(buf[:]),
- TTL: int64(ttl),
- JWT: false,
- })
- if err != nil {
- return nil, err
- }
- if reply.Err != "" {
- return nil, errors.New(reply.Err)
- }
-
- info, err := pb.ProtoResponseWrapInfoToLogicalResponseWrapInfo(reply.WrapInfo)
- if err != nil {
- return nil, err
- }
-
- return info, nil
-}
-
-func (s *gRPCSystemViewClient) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) {
- return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend")
-}
-
-func (s *gRPCSystemViewClient) MlockEnabled() bool {
- reply, err := s.client.MlockEnabled(context.Background(), &pb.Empty{})
- if err != nil {
- return false
- }
-
- return reply.Enabled
-}
-
-func (s *gRPCSystemViewClient) HasFeature(feature license.Features) bool {
- // Not implemented
- return false
-}
-
-func (s *gRPCSystemViewClient) LocalMount() bool {
- reply, err := s.client.LocalMount(context.Background(), &pb.Empty{})
- if err != nil {
- return false
- }
-
- return reply.Local
-}
-
-func (s *gRPCSystemViewClient) EntityInfo(entityID string) (*logical.Entity, error) {
- reply, err := s.client.EntityInfo(context.Background(), &pb.EntityInfoArgs{
- EntityID: entityID,
- })
- if err != nil {
- return nil, err
- }
- if reply.Err != "" {
- return nil, errors.New(reply.Err)
- }
-
- return reply.Entity, nil
-}
-
-func (s *gRPCSystemViewClient) PluginEnv(ctx context.Context) (*logical.PluginEnvironment, error) {
- reply, err := s.client.PluginEnv(ctx, &pb.Empty{})
- if err != nil {
- return nil, err
- }
-
- return reply.PluginEnvironment, nil
-}
-
-type gRPCSystemViewServer struct {
- impl logical.SystemView
-}
-
-func (s *gRPCSystemViewServer) DefaultLeaseTTL(ctx context.Context, _ *pb.Empty) (*pb.TTLReply, error) {
- ttl := s.impl.DefaultLeaseTTL()
- return &pb.TTLReply{
- TTL: int64(ttl),
- }, nil
-}
-
-func (s *gRPCSystemViewServer) MaxLeaseTTL(ctx context.Context, _ *pb.Empty) (*pb.TTLReply, error) {
- ttl := s.impl.MaxLeaseTTL()
- return &pb.TTLReply{
- TTL: int64(ttl),
- }, nil
-}
-
-func (s *gRPCSystemViewServer) SudoPrivilege(ctx context.Context, args *pb.SudoPrivilegeArgs) (*pb.SudoPrivilegeReply, error) {
- sudo := s.impl.SudoPrivilege(ctx, args.Path, args.Token)
- return &pb.SudoPrivilegeReply{
- Sudo: sudo,
- }, nil
-}
-
-func (s *gRPCSystemViewServer) Tainted(ctx context.Context, _ *pb.Empty) (*pb.TaintedReply, error) {
- tainted := s.impl.Tainted()
- return &pb.TaintedReply{
- Tainted: tainted,
- }, nil
-}
-
-func (s *gRPCSystemViewServer) CachingDisabled(ctx context.Context, _ *pb.Empty) (*pb.CachingDisabledReply, error) {
- cachingDisabled := s.impl.CachingDisabled()
- return &pb.CachingDisabledReply{
- Disabled: cachingDisabled,
- }, nil
-}
-
-func (s *gRPCSystemViewServer) ReplicationState(ctx context.Context, _ *pb.Empty) (*pb.ReplicationStateReply, error) {
- replicationState := s.impl.ReplicationState()
- return &pb.ReplicationStateReply{
- State: int32(replicationState),
- }, nil
-}
-
-func (s *gRPCSystemViewServer) ResponseWrapData(ctx context.Context, args *pb.ResponseWrapDataArgs) (*pb.ResponseWrapDataReply, error) {
- data := map[string]interface{}{}
- err := json.Unmarshal([]byte(args.Data), &data)
- if err != nil {
- return &pb.ResponseWrapDataReply{}, err
- }
-
- // Do not allow JWTs to be returned
- info, err := s.impl.ResponseWrapData(ctx, data, time.Duration(args.TTL), false)
- if err != nil {
- return &pb.ResponseWrapDataReply{
- Err: pb.ErrToString(err),
- }, nil
- }
-
- pbInfo, err := pb.LogicalResponseWrapInfoToProtoResponseWrapInfo(info)
- if err != nil {
- return &pb.ResponseWrapDataReply{}, err
- }
-
- return &pb.ResponseWrapDataReply{
- WrapInfo: pbInfo,
- }, nil
-}
-
-func (s *gRPCSystemViewServer) MlockEnabled(ctx context.Context, _ *pb.Empty) (*pb.MlockEnabledReply, error) {
- enabled := s.impl.MlockEnabled()
- return &pb.MlockEnabledReply{
- Enabled: enabled,
- }, nil
-}
-
-func (s *gRPCSystemViewServer) LocalMount(ctx context.Context, _ *pb.Empty) (*pb.LocalMountReply, error) {
- local := s.impl.LocalMount()
- return &pb.LocalMountReply{
- Local: local,
- }, nil
-}
-
-func (s *gRPCSystemViewServer) EntityInfo(ctx context.Context, args *pb.EntityInfoArgs) (*pb.EntityInfoReply, error) {
- entity, err := s.impl.EntityInfo(args.EntityID)
- if err != nil {
- return &pb.EntityInfoReply{
- Err: pb.ErrToString(err),
- }, nil
- }
- return &pb.EntityInfoReply{
- Entity: entity,
- }, nil
-}
-
-func (s *gRPCSystemViewServer) PluginEnv(ctx context.Context, _ *pb.Empty) (*pb.PluginEnvReply, error) {
- pluginEnv, err := s.impl.PluginEnv(ctx)
- if err != nil {
- return &pb.PluginEnvReply{
- Err: pb.ErrToString(err),
- }, nil
- }
- return &pb.PluginEnvReply{
- PluginEnvironment: pluginEnv,
- }, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/logger.go b/vendor/github.com/hashicorp/vault/logical/plugin/logger.go
deleted file mode 100644
index a59a8a3d..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/logger.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package plugin
-
-import hclog "github.com/hashicorp/go-hclog"
-
-type LoggerServer struct {
- logger hclog.Logger
-}
-
-func (l *LoggerServer) Trace(args *LoggerArgs, _ *struct{}) error {
- l.logger.Trace(args.Msg, args.Args...)
- return nil
-}
-
-func (l *LoggerServer) Debug(args *LoggerArgs, _ *struct{}) error {
- l.logger.Debug(args.Msg, args.Args...)
- return nil
-}
-
-func (l *LoggerServer) Info(args *LoggerArgs, _ *struct{}) error {
- l.logger.Info(args.Msg, args.Args...)
- return nil
-}
-
-func (l *LoggerServer) Warn(args *LoggerArgs, reply *LoggerReply) error {
- l.logger.Warn(args.Msg, args.Args...)
- return nil
-}
-
-func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error {
- l.logger.Error(args.Msg, args.Args...)
- return nil
-}
-
-func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error {
-
- switch translateLevel(args.Level) {
-
- case hclog.Trace:
- l.logger.Trace(args.Msg, args.Args...)
-
- case hclog.Debug:
- l.logger.Debug(args.Msg, args.Args...)
-
- case hclog.Info:
- l.logger.Info(args.Msg, args.Args...)
-
- case hclog.Warn:
- l.logger.Warn(args.Msg, args.Args...)
-
- case hclog.Error:
- l.logger.Error(args.Msg, args.Args...)
-
- case hclog.NoLevel:
- }
- return nil
-}
-
-func (l *LoggerServer) SetLevel(args int, _ *struct{}) error {
- level := translateLevel(args)
- l.logger = hclog.New(&hclog.LoggerOptions{Level: level})
- return nil
-}
-
-func (l *LoggerServer) IsTrace(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsTrace()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-func (l *LoggerServer) IsDebug(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsDebug()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-func (l *LoggerServer) IsInfo(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsInfo()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-func (l *LoggerServer) IsWarn(args interface{}, reply *LoggerReply) error {
- result := l.logger.IsWarn()
- *reply = LoggerReply{
- IsTrue: result,
- }
- return nil
-}
-
-type LoggerArgs struct {
- Level int
- Msg string
- Args []interface{}
-}
-
-// LoggerReply contains the RPC reply. Not all fields may be used
-// for a particular RPC call.
-type LoggerReply struct {
- IsTrue bool
- Error error
-}
-
-func translateLevel(logxiLevel int) hclog.Level {
-
- switch logxiLevel {
-
- case 1000, 10:
- // logxi.LevelAll, logxi.LevelTrace:
- return hclog.Trace
-
- case 7:
- // logxi.LevelDebug:
- return hclog.Debug
-
- case 6, 5:
- // logxi.LevelInfo, logxi.LevelNotice:
- return hclog.Info
-
- case 4:
- // logxi.LevelWarn:
- return hclog.Warn
-
- case 3, 2, 1, -1:
- // logxi.LevelError, logxi.LevelFatal, logxi.LevelAlert, logxi.LevelEmergency:
- return hclog.Error
- }
- return hclog.NoLevel
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/middleware.go b/vendor/github.com/hashicorp/vault/logical/plugin/middleware.go
deleted file mode 100644
index d9aeed0f..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/middleware.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package plugin
-
-import (
- "context"
- "time"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/vault/logical"
-)
-
-// backendPluginClient implements logical.Backend and is the
-// go-plugin client.
-type backendTracingMiddleware struct {
- logger log.Logger
-
- next logical.Backend
-}
-
-// Validate the backendTracingMiddle object satisfies the backend interface
-var _ logical.Backend = &backendTracingMiddleware{}
-
-func (b *backendTracingMiddleware) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) {
- defer func(then time.Time) {
- b.logger.Trace("handle request", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then))
- }(time.Now())
-
- b.logger.Trace("handle request", "path", req.Path, "status", "started")
- return b.next.HandleRequest(ctx, req)
-}
-
-func (b *backendTracingMiddleware) SpecialPaths() *logical.Paths {
- defer func(then time.Time) {
- b.logger.Trace("special paths", "status", "finished", "took", time.Since(then))
- }(time.Now())
-
- b.logger.Trace("special paths", "status", "started")
- return b.next.SpecialPaths()
-}
-
-func (b *backendTracingMiddleware) System() logical.SystemView {
- return b.next.System()
-}
-
-func (b *backendTracingMiddleware) Logger() log.Logger {
- return b.next.Logger()
-}
-
-func (b *backendTracingMiddleware) HandleExistenceCheck(ctx context.Context, req *logical.Request) (found bool, exists bool, err error) {
- defer func(then time.Time) {
- b.logger.Trace("handle existence check", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then))
- }(time.Now())
-
- b.logger.Trace("handle existence check", "path", req.Path, "status", "started")
- return b.next.HandleExistenceCheck(ctx, req)
-}
-
-func (b *backendTracingMiddleware) Cleanup(ctx context.Context) {
- defer func(then time.Time) {
- b.logger.Trace("cleanup", "status", "finished", "took", time.Since(then))
- }(time.Now())
-
- b.logger.Trace("cleanup", "status", "started")
- b.next.Cleanup(ctx)
-}
-
-func (b *backendTracingMiddleware) InvalidateKey(ctx context.Context, key string) {
- defer func(then time.Time) {
- b.logger.Trace("invalidate key", "key", key, "status", "finished", "took", time.Since(then))
- }(time.Now())
-
- b.logger.Trace("invalidate key", "key", key, "status", "started")
- b.next.InvalidateKey(ctx, key)
-}
-
-func (b *backendTracingMiddleware) Setup(ctx context.Context, config *logical.BackendConfig) (err error) {
- defer func(then time.Time) {
- b.logger.Trace("setup", "status", "finished", "err", err, "took", time.Since(then))
- }(time.Now())
-
- b.logger.Trace("setup", "status", "started")
- return b.next.Setup(ctx, config)
-}
-
-func (b *backendTracingMiddleware) Type() logical.BackendType {
- defer func(then time.Time) {
- b.logger.Trace("type", "status", "finished", "took", time.Since(then))
- }(time.Now())
-
- b.logger.Trace("type", "status", "started")
- return b.next.Type()
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/pb/backend.pb.go b/vendor/github.com/hashicorp/vault/logical/plugin/pb/backend.pb.go
deleted file mode 100644
index 911bb497..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/pb/backend.pb.go
+++ /dev/null
@@ -1,3791 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: logical/plugin/pb/backend.proto
-
-package pb
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- logical "github.com/hashicorp/vault/logical"
- math "math"
-)
-
-import (
- context "golang.org/x/net/context"
- grpc "google.golang.org/grpc"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Empty struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{0}
-}
-
-func (m *Empty) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Empty.Unmarshal(m, b)
-}
-func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
-}
-func (m *Empty) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Empty.Merge(m, src)
-}
-func (m *Empty) XXX_Size() int {
- return xxx_messageInfo_Empty.Size(m)
-}
-func (m *Empty) XXX_DiscardUnknown() {
- xxx_messageInfo_Empty.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Empty proto.InternalMessageInfo
-
-type Header struct {
- Header []string `sentinel:"" protobuf:"bytes,1,rep,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Header) Reset() { *m = Header{} }
-func (m *Header) String() string { return proto.CompactTextString(m) }
-func (*Header) ProtoMessage() {}
-func (*Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{1}
-}
-
-func (m *Header) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Header.Unmarshal(m, b)
-}
-func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Header.Marshal(b, m, deterministic)
-}
-func (m *Header) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Header.Merge(m, src)
-}
-func (m *Header) XXX_Size() int {
- return xxx_messageInfo_Header.Size(m)
-}
-func (m *Header) XXX_DiscardUnknown() {
- xxx_messageInfo_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Header proto.InternalMessageInfo
-
-func (m *Header) GetHeader() []string {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type ProtoError struct {
- // Error type can be one of:
- // ErrTypeUnknown uint32 = iota
- // ErrTypeUserError
- // ErrTypeInternalError
- // ErrTypeCodedError
- // ErrTypeStatusBadRequest
- // ErrTypeUnsupportedOperation
- // ErrTypeUnsupportedPath
- // ErrTypeInvalidRequest
- // ErrTypePermissionDenied
- // ErrTypeMultiAuthzPending
- ErrType uint32 `sentinel:"" protobuf:"varint,1,opt,name=err_type,json=errType,proto3" json:"err_type,omitempty"`
- ErrMsg string `sentinel:"" protobuf:"bytes,2,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"`
- ErrCode int64 `sentinel:"" protobuf:"varint,3,opt,name=err_code,json=errCode,proto3" json:"err_code,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ProtoError) Reset() { *m = ProtoError{} }
-func (m *ProtoError) String() string { return proto.CompactTextString(m) }
-func (*ProtoError) ProtoMessage() {}
-func (*ProtoError) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{2}
-}
-
-func (m *ProtoError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ProtoError.Unmarshal(m, b)
-}
-func (m *ProtoError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ProtoError.Marshal(b, m, deterministic)
-}
-func (m *ProtoError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProtoError.Merge(m, src)
-}
-func (m *ProtoError) XXX_Size() int {
- return xxx_messageInfo_ProtoError.Size(m)
-}
-func (m *ProtoError) XXX_DiscardUnknown() {
- xxx_messageInfo_ProtoError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProtoError proto.InternalMessageInfo
-
-func (m *ProtoError) GetErrType() uint32 {
- if m != nil {
- return m.ErrType
- }
- return 0
-}
-
-func (m *ProtoError) GetErrMsg() string {
- if m != nil {
- return m.ErrMsg
- }
- return ""
-}
-
-func (m *ProtoError) GetErrCode() int64 {
- if m != nil {
- return m.ErrCode
- }
- return 0
-}
-
-// Paths is the structure of special paths that is used for SpecialPaths.
-type Paths struct {
- // Root are the paths that require a root token to access
- Root []string `sentinel:"" protobuf:"bytes,1,rep,name=root,proto3" json:"root,omitempty"`
- // Unauthenticated are the paths that can be accessed without any auth.
- Unauthenticated []string `sentinel:"" protobuf:"bytes,2,rep,name=unauthenticated,proto3" json:"unauthenticated,omitempty"`
- // LocalStorage are paths (prefixes) that are local to this instance; this
- // indicates that these paths should not be replicated
- LocalStorage []string `sentinel:"" protobuf:"bytes,3,rep,name=local_storage,json=localStorage,proto3" json:"local_storage,omitempty"`
- // SealWrapStorage are storage paths that, when using a capable seal,
- // should be seal wrapped with extra encryption. It is exact matching
- // unless it ends with '/' in which case it will be treated as a prefix.
- SealWrapStorage []string `sentinel:"" protobuf:"bytes,4,rep,name=seal_wrap_storage,json=sealWrapStorage,proto3" json:"seal_wrap_storage,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Paths) Reset() { *m = Paths{} }
-func (m *Paths) String() string { return proto.CompactTextString(m) }
-func (*Paths) ProtoMessage() {}
-func (*Paths) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{3}
-}
-
-func (m *Paths) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Paths.Unmarshal(m, b)
-}
-func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Paths.Marshal(b, m, deterministic)
-}
-func (m *Paths) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Paths.Merge(m, src)
-}
-func (m *Paths) XXX_Size() int {
- return xxx_messageInfo_Paths.Size(m)
-}
-func (m *Paths) XXX_DiscardUnknown() {
- xxx_messageInfo_Paths.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Paths proto.InternalMessageInfo
-
-func (m *Paths) GetRoot() []string {
- if m != nil {
- return m.Root
- }
- return nil
-}
-
-func (m *Paths) GetUnauthenticated() []string {
- if m != nil {
- return m.Unauthenticated
- }
- return nil
-}
-
-func (m *Paths) GetLocalStorage() []string {
- if m != nil {
- return m.LocalStorage
- }
- return nil
-}
-
-func (m *Paths) GetSealWrapStorage() []string {
- if m != nil {
- return m.SealWrapStorage
- }
- return nil
-}
-
-type Request struct {
- // ID is the uuid associated with each request
- ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // If set, the name given to the replication secondary where this request
- // originated
- ReplicationCluster string `sentinel:"" protobuf:"bytes,2,opt,name=ReplicationCluster,proto3" json:"ReplicationCluster,omitempty"`
- // Operation is the requested operation type
- Operation string `sentinel:"" protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"`
- // Path is the part of the request path not consumed by the
- // routing. As an example, if the original request path is "prod/aws/foo"
- // and the AWS logical backend is mounted at "prod/aws/", then the
- // final path is "foo" since the mount prefix is trimmed.
- Path string `sentinel:"" protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"`
- // Request data is a JSON object that must have keys with string type.
- Data string `sentinel:"" protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"`
- // Secret will be non-nil only for Revoke and Renew operations
- // to represent the secret that was returned prior.
- Secret *Secret `sentinel:"" protobuf:"bytes,6,opt,name=secret,proto3" json:"secret,omitempty"`
- // Auth will be non-nil only for Renew operations
- // to represent the auth that was returned prior.
- Auth *Auth `sentinel:"" protobuf:"bytes,7,opt,name=auth,proto3" json:"auth,omitempty"`
- // Headers will contain the http headers from the request. This value will
- // be used in the audit broker to ensure we are auditing only the allowed
- // headers.
- Headers map[string]*Header `sentinel:"" protobuf:"bytes,8,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // ClientToken is provided to the core so that the identity
- // can be verified and ACLs applied. This value is passed
- // through to the logical backends but after being salted and
- // hashed.
- ClientToken string `sentinel:"" protobuf:"bytes,9,opt,name=client_token,json=clientToken,proto3" json:"client_token,omitempty"`
- // ClientTokenAccessor is provided to the core so that the it can get
- // logged as part of request audit logging.
- ClientTokenAccessor string `sentinel:"" protobuf:"bytes,10,opt,name=client_token_accessor,json=clientTokenAccessor,proto3" json:"client_token_accessor,omitempty"`
- // DisplayName is provided to the logical backend to help associate
- // dynamic secrets with the source entity. This is not a sensitive
- // name, but is useful for operators.
- DisplayName string `sentinel:"" protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- // MountPoint is provided so that a logical backend can generate
- // paths relative to itself. The `Path` is effectively the client
- // request path with the MountPoint trimmed off.
- MountPoint string `sentinel:"" protobuf:"bytes,12,opt,name=mount_point,json=mountPoint,proto3" json:"mount_point,omitempty"`
- // MountType is provided so that a logical backend can make decisions
- // based on the specific mount type (e.g., if a mount type has different
- // aliases, generating different defaults depending on the alias)
- MountType string `sentinel:"" protobuf:"bytes,13,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"`
- // MountAccessor is provided so that identities returned by the authentication
- // backends can be tied to the mount it belongs to.
- MountAccessor string `sentinel:"" protobuf:"bytes,14,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"`
- // WrapInfo contains requested response wrapping parameters
- WrapInfo *RequestWrapInfo `sentinel:"" protobuf:"bytes,15,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"`
- // ClientTokenRemainingUses represents the allowed number of uses left on the
- // token supplied
- ClientTokenRemainingUses int64 `sentinel:"" protobuf:"varint,16,opt,name=client_token_remaining_uses,json=clientTokenRemainingUses,proto3" json:"client_token_remaining_uses,omitempty"`
- // EntityID is the identity of the caller extracted out of the token used
- // to make this request
- EntityID string `sentinel:"" protobuf:"bytes,17,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
- // PolicyOverride indicates that the requestor wishes to override
- // soft-mandatory Sentinel policies
- PolicyOverride bool `sentinel:"" protobuf:"varint,18,opt,name=policy_override,json=policyOverride,proto3" json:"policy_override,omitempty"`
- // Whether the request is unauthenticated, as in, had no client token
- // attached. Useful in some situations where the client token is not made
- // accessible.
- Unauthenticated bool `sentinel:"" protobuf:"varint,19,opt,name=unauthenticated,proto3" json:"unauthenticated,omitempty"`
- // Connection will be non-nil only for credential providers to
- // inspect the connection information and potentially use it for
- // authentication/protection.
- Connection *Connection `sentinel:"" protobuf:"bytes,20,opt,name=connection,proto3" json:"connection,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{4}
-}
-
-func (m *Request) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Request.Unmarshal(m, b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Request.Marshal(b, m, deterministic)
-}
-func (m *Request) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Request.Merge(m, src)
-}
-func (m *Request) XXX_Size() int {
- return xxx_messageInfo_Request.Size(m)
-}
-func (m *Request) XXX_DiscardUnknown() {
- xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-func (m *Request) GetID() string {
- if m != nil {
- return m.ID
- }
- return ""
-}
-
-func (m *Request) GetReplicationCluster() string {
- if m != nil {
- return m.ReplicationCluster
- }
- return ""
-}
-
-func (m *Request) GetOperation() string {
- if m != nil {
- return m.Operation
- }
- return ""
-}
-
-func (m *Request) GetPath() string {
- if m != nil {
- return m.Path
- }
- return ""
-}
-
-func (m *Request) GetData() string {
- if m != nil {
- return m.Data
- }
- return ""
-}
-
-func (m *Request) GetSecret() *Secret {
- if m != nil {
- return m.Secret
- }
- return nil
-}
-
-func (m *Request) GetAuth() *Auth {
- if m != nil {
- return m.Auth
- }
- return nil
-}
-
-func (m *Request) GetHeaders() map[string]*Header {
- if m != nil {
- return m.Headers
- }
- return nil
-}
-
-func (m *Request) GetClientToken() string {
- if m != nil {
- return m.ClientToken
- }
- return ""
-}
-
-func (m *Request) GetClientTokenAccessor() string {
- if m != nil {
- return m.ClientTokenAccessor
- }
- return ""
-}
-
-func (m *Request) GetDisplayName() string {
- if m != nil {
- return m.DisplayName
- }
- return ""
-}
-
-func (m *Request) GetMountPoint() string {
- if m != nil {
- return m.MountPoint
- }
- return ""
-}
-
-func (m *Request) GetMountType() string {
- if m != nil {
- return m.MountType
- }
- return ""
-}
-
-func (m *Request) GetMountAccessor() string {
- if m != nil {
- return m.MountAccessor
- }
- return ""
-}
-
-func (m *Request) GetWrapInfo() *RequestWrapInfo {
- if m != nil {
- return m.WrapInfo
- }
- return nil
-}
-
-func (m *Request) GetClientTokenRemainingUses() int64 {
- if m != nil {
- return m.ClientTokenRemainingUses
- }
- return 0
-}
-
-func (m *Request) GetEntityID() string {
- if m != nil {
- return m.EntityID
- }
- return ""
-}
-
-func (m *Request) GetPolicyOverride() bool {
- if m != nil {
- return m.PolicyOverride
- }
- return false
-}
-
-func (m *Request) GetUnauthenticated() bool {
- if m != nil {
- return m.Unauthenticated
- }
- return false
-}
-
-func (m *Request) GetConnection() *Connection {
- if m != nil {
- return m.Connection
- }
- return nil
-}
-
-type Auth struct {
- LeaseOptions *LeaseOptions `sentinel:"" protobuf:"bytes,1,opt,name=lease_options,json=leaseOptions,proto3" json:"lease_options,omitempty"`
- // InternalData is a JSON object that is stored with the auth struct.
- // This will be sent back during a Renew/Revoke for storing internal data
- // used for those operations.
- InternalData string `sentinel:"" protobuf:"bytes,2,opt,name=internal_data,json=internalData,proto3" json:"internal_data,omitempty"`
- // DisplayName is a non-security sensitive identifier that is
- // applicable to this Auth. It is used for logging and prefixing
- // of dynamic secrets. For example, DisplayName may be "armon" for
- // the github credential backend. If the client token is used to
- // generate a SQL credential, the user may be "github-armon-uuid".
- // This is to help identify the source without using audit tables.
- DisplayName string `sentinel:"" protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- // Policies is the list of policies that the authenticated user
- // is associated with.
- Policies []string `sentinel:"" protobuf:"bytes,4,rep,name=policies,proto3" json:"policies,omitempty"`
- // Metadata is used to attach arbitrary string-type metadata to
- // an authenticated user. This metadata will be outputted into the
- // audit log.
- Metadata map[string]string `sentinel:"" protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // ClientToken is the token that is generated for the authentication.
- // This will be filled in by Vault core when an auth structure is
- // returned. Setting this manually will have no effect.
- ClientToken string `sentinel:"" protobuf:"bytes,6,opt,name=client_token,json=clientToken,proto3" json:"client_token,omitempty"`
- // Accessor is the identifier for the ClientToken. This can be used
- // to perform management functionalities (especially revocation) when
- // ClientToken in the audit logs are obfuscated. Accessor can be used
- // to revoke a ClientToken and to lookup the capabilities of the ClientToken,
- // both without actually knowing the ClientToken.
- Accessor string `sentinel:"" protobuf:"bytes,7,opt,name=accessor,proto3" json:"accessor,omitempty"`
- // Period indicates that the token generated using this Auth object
- // should never expire. The token should be renewed within the duration
- // specified by this period.
- Period int64 `sentinel:"" protobuf:"varint,8,opt,name=period,proto3" json:"period,omitempty"`
- // Number of allowed uses of the issued token
- NumUses int64 `sentinel:"" protobuf:"varint,9,opt,name=num_uses,json=numUses,proto3" json:"num_uses,omitempty"`
- // EntityID is the identifier of the entity in identity store to which the
- // identity of the authenticating client belongs to.
- EntityID string `sentinel:"" protobuf:"bytes,10,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
- // Alias is the information about the authenticated client returned by
- // the auth backend
- Alias *logical.Alias `sentinel:"" protobuf:"bytes,11,opt,name=alias,proto3" json:"alias,omitempty"`
- // GroupAliases are the informational mappings of external groups which an
- // authenticated user belongs to. This is used to check if there are
- // mappings groups for the group aliases in identity store. For all the
- // matching groups, the entity ID of the user will be added.
- GroupAliases []*logical.Alias `sentinel:"" protobuf:"bytes,12,rep,name=group_aliases,json=groupAliases,proto3" json:"group_aliases,omitempty"`
- // If set, restricts usage of the certificates to client IPs falling within
- // the range of the specified CIDR(s).
- BoundCIDRs []string `sentinel:"" protobuf:"bytes,13,rep,name=bound_cidrs,json=boundCidrs,proto3" json:"bound_cidrs,omitempty"`
- // TokenPolicies and IdentityPolicies break down the list in Policies to
- // help determine where a policy was sourced
- TokenPolicies []string `sentinel:"" protobuf:"bytes,14,rep,name=token_policies,json=tokenPolicies,proto3" json:"token_policies,omitempty"`
- IdentityPolicies []string `sentinel:"" protobuf:"bytes,15,rep,name=identity_policies,json=identityPolicies,proto3" json:"identity_policies,omitempty"`
- // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum
- // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens.
- ExplicitMaxTTL int64 `sentinel:"" protobuf:"varint,16,opt,name=explicit_max_ttl,json=explicitMaxTtl,proto3" json:"explicit_max_ttl,omitempty"`
- // TokenType is the type of token being requested
- TokenType uint32 `sentinel:"" protobuf:"varint,17,opt,name=token_type,json=tokenType,proto3" json:"token_type,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Auth) Reset() { *m = Auth{} }
-func (m *Auth) String() string { return proto.CompactTextString(m) }
-func (*Auth) ProtoMessage() {}
-func (*Auth) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{5}
-}
-
-func (m *Auth) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Auth.Unmarshal(m, b)
-}
-func (m *Auth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Auth.Marshal(b, m, deterministic)
-}
-func (m *Auth) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Auth.Merge(m, src)
-}
-func (m *Auth) XXX_Size() int {
- return xxx_messageInfo_Auth.Size(m)
-}
-func (m *Auth) XXX_DiscardUnknown() {
- xxx_messageInfo_Auth.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Auth proto.InternalMessageInfo
-
-func (m *Auth) GetLeaseOptions() *LeaseOptions {
- if m != nil {
- return m.LeaseOptions
- }
- return nil
-}
-
-func (m *Auth) GetInternalData() string {
- if m != nil {
- return m.InternalData
- }
- return ""
-}
-
-func (m *Auth) GetDisplayName() string {
- if m != nil {
- return m.DisplayName
- }
- return ""
-}
-
-func (m *Auth) GetPolicies() []string {
- if m != nil {
- return m.Policies
- }
- return nil
-}
-
-func (m *Auth) GetMetadata() map[string]string {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-func (m *Auth) GetClientToken() string {
- if m != nil {
- return m.ClientToken
- }
- return ""
-}
-
-func (m *Auth) GetAccessor() string {
- if m != nil {
- return m.Accessor
- }
- return ""
-}
-
-func (m *Auth) GetPeriod() int64 {
- if m != nil {
- return m.Period
- }
- return 0
-}
-
-func (m *Auth) GetNumUses() int64 {
- if m != nil {
- return m.NumUses
- }
- return 0
-}
-
-func (m *Auth) GetEntityID() string {
- if m != nil {
- return m.EntityID
- }
- return ""
-}
-
-func (m *Auth) GetAlias() *logical.Alias {
- if m != nil {
- return m.Alias
- }
- return nil
-}
-
-func (m *Auth) GetGroupAliases() []*logical.Alias {
- if m != nil {
- return m.GroupAliases
- }
- return nil
-}
-
-func (m *Auth) GetBoundCIDRs() []string {
- if m != nil {
- return m.BoundCIDRs
- }
- return nil
-}
-
-func (m *Auth) GetTokenPolicies() []string {
- if m != nil {
- return m.TokenPolicies
- }
- return nil
-}
-
-func (m *Auth) GetIdentityPolicies() []string {
- if m != nil {
- return m.IdentityPolicies
- }
- return nil
-}
-
-func (m *Auth) GetExplicitMaxTTL() int64 {
- if m != nil {
- return m.ExplicitMaxTTL
- }
- return 0
-}
-
-func (m *Auth) GetTokenType() uint32 {
- if m != nil {
- return m.TokenType
- }
- return 0
-}
-
-type TokenEntry struct {
- ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- Accessor string `sentinel:"" protobuf:"bytes,2,opt,name=accessor,proto3" json:"accessor,omitempty"`
- Parent string `sentinel:"" protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"`
- Policies []string `sentinel:"" protobuf:"bytes,4,rep,name=policies,proto3" json:"policies,omitempty"`
- Path string `sentinel:"" protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"`
- Meta map[string]string `sentinel:"" protobuf:"bytes,6,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- DisplayName string `sentinel:"" protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- NumUses int64 `sentinel:"" protobuf:"varint,8,opt,name=num_uses,json=numUses,proto3" json:"num_uses,omitempty"`
- CreationTime int64 `sentinel:"" protobuf:"varint,9,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
- TTL int64 `sentinel:"" protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"`
- ExplicitMaxTTL int64 `sentinel:"" protobuf:"varint,11,opt,name=explicit_max_ttl,json=explicitMaxTtl,proto3" json:"explicit_max_ttl,omitempty"`
- Role string `sentinel:"" protobuf:"bytes,12,opt,name=role,proto3" json:"role,omitempty"`
- Period int64 `sentinel:"" protobuf:"varint,13,opt,name=period,proto3" json:"period,omitempty"`
- EntityID string `sentinel:"" protobuf:"bytes,14,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
- BoundCIDRs []string `sentinel:"" protobuf:"bytes,15,rep,name=bound_cidrs,json=boundCidrs,proto3" json:"bound_cidrs,omitempty"`
- NamespaceID string `sentinel:"" protobuf:"bytes,16,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"`
- CubbyholeID string `sentinel:"" protobuf:"bytes,17,opt,name=cubbyhole_id,json=cubbyholeId,proto3" json:"cubbyhole_id,omitempty"`
- Type uint32 `sentinel:"" protobuf:"varint,18,opt,name=type,proto3" json:"type,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TokenEntry) Reset() { *m = TokenEntry{} }
-func (m *TokenEntry) String() string { return proto.CompactTextString(m) }
-func (*TokenEntry) ProtoMessage() {}
-func (*TokenEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{6}
-}
-
-func (m *TokenEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TokenEntry.Unmarshal(m, b)
-}
-func (m *TokenEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TokenEntry.Marshal(b, m, deterministic)
-}
-func (m *TokenEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenEntry.Merge(m, src)
-}
-func (m *TokenEntry) XXX_Size() int {
- return xxx_messageInfo_TokenEntry.Size(m)
-}
-func (m *TokenEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenEntry proto.InternalMessageInfo
-
-func (m *TokenEntry) GetID() string {
- if m != nil {
- return m.ID
- }
- return ""
-}
-
-func (m *TokenEntry) GetAccessor() string {
- if m != nil {
- return m.Accessor
- }
- return ""
-}
-
-func (m *TokenEntry) GetParent() string {
- if m != nil {
- return m.Parent
- }
- return ""
-}
-
-func (m *TokenEntry) GetPolicies() []string {
- if m != nil {
- return m.Policies
- }
- return nil
-}
-
-func (m *TokenEntry) GetPath() string {
- if m != nil {
- return m.Path
- }
- return ""
-}
-
-func (m *TokenEntry) GetMeta() map[string]string {
- if m != nil {
- return m.Meta
- }
- return nil
-}
-
-func (m *TokenEntry) GetDisplayName() string {
- if m != nil {
- return m.DisplayName
- }
- return ""
-}
-
-func (m *TokenEntry) GetNumUses() int64 {
- if m != nil {
- return m.NumUses
- }
- return 0
-}
-
-func (m *TokenEntry) GetCreationTime() int64 {
- if m != nil {
- return m.CreationTime
- }
- return 0
-}
-
-func (m *TokenEntry) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *TokenEntry) GetExplicitMaxTTL() int64 {
- if m != nil {
- return m.ExplicitMaxTTL
- }
- return 0
-}
-
-func (m *TokenEntry) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-func (m *TokenEntry) GetPeriod() int64 {
- if m != nil {
- return m.Period
- }
- return 0
-}
-
-func (m *TokenEntry) GetEntityID() string {
- if m != nil {
- return m.EntityID
- }
- return ""
-}
-
-func (m *TokenEntry) GetBoundCIDRs() []string {
- if m != nil {
- return m.BoundCIDRs
- }
- return nil
-}
-
-func (m *TokenEntry) GetNamespaceID() string {
- if m != nil {
- return m.NamespaceID
- }
- return ""
-}
-
-func (m *TokenEntry) GetCubbyholeID() string {
- if m != nil {
- return m.CubbyholeID
- }
- return ""
-}
-
-func (m *TokenEntry) GetType() uint32 {
- if m != nil {
- return m.Type
- }
- return 0
-}
-
-type LeaseOptions struct {
- TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
- Renewable bool `sentinel:"" protobuf:"varint,2,opt,name=renewable,proto3" json:"renewable,omitempty"`
- Increment int64 `sentinel:"" protobuf:"varint,3,opt,name=increment,proto3" json:"increment,omitempty"`
- IssueTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,4,opt,name=issue_time,json=issueTime,proto3" json:"issue_time,omitempty"`
- MaxTTL int64 `sentinel:"" protobuf:"varint,5,opt,name=MaxTTL,proto3" json:"MaxTTL,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseOptions) Reset() { *m = LeaseOptions{} }
-func (m *LeaseOptions) String() string { return proto.CompactTextString(m) }
-func (*LeaseOptions) ProtoMessage() {}
-func (*LeaseOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{7}
-}
-
-func (m *LeaseOptions) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LeaseOptions.Unmarshal(m, b)
-}
-func (m *LeaseOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LeaseOptions.Marshal(b, m, deterministic)
-}
-func (m *LeaseOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseOptions.Merge(m, src)
-}
-func (m *LeaseOptions) XXX_Size() int {
- return xxx_messageInfo_LeaseOptions.Size(m)
-}
-func (m *LeaseOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseOptions proto.InternalMessageInfo
-
-func (m *LeaseOptions) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *LeaseOptions) GetRenewable() bool {
- if m != nil {
- return m.Renewable
- }
- return false
-}
-
-func (m *LeaseOptions) GetIncrement() int64 {
- if m != nil {
- return m.Increment
- }
- return 0
-}
-
-func (m *LeaseOptions) GetIssueTime() *timestamp.Timestamp {
- if m != nil {
- return m.IssueTime
- }
- return nil
-}
-
-func (m *LeaseOptions) GetMaxTTL() int64 {
- if m != nil {
- return m.MaxTTL
- }
- return 0
-}
-
-type Secret struct {
- LeaseOptions *LeaseOptions `sentinel:"" protobuf:"bytes,1,opt,name=lease_options,json=leaseOptions,proto3" json:"lease_options,omitempty"`
- // InternalData is a JSON object that is stored with the secret.
- // This will be sent back during a Renew/Revoke for storing internal data
- // used for those operations.
- InternalData string `sentinel:"" protobuf:"bytes,2,opt,name=internal_data,json=internalData,proto3" json:"internal_data,omitempty"`
- // LeaseID is the ID returned to the user to manage this secret.
- // This is generated by Vault core. Any set value will be ignored.
- // For requests, this will always be blank.
- LeaseID string `sentinel:"" protobuf:"bytes,3,opt,name=lease_id,json=leaseId,proto3" json:"lease_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Secret) Reset() { *m = Secret{} }
-func (m *Secret) String() string { return proto.CompactTextString(m) }
-func (*Secret) ProtoMessage() {}
-func (*Secret) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{8}
-}
-
-func (m *Secret) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Secret.Unmarshal(m, b)
-}
-func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Secret.Marshal(b, m, deterministic)
-}
-func (m *Secret) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Secret.Merge(m, src)
-}
-func (m *Secret) XXX_Size() int {
- return xxx_messageInfo_Secret.Size(m)
-}
-func (m *Secret) XXX_DiscardUnknown() {
- xxx_messageInfo_Secret.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Secret proto.InternalMessageInfo
-
-func (m *Secret) GetLeaseOptions() *LeaseOptions {
- if m != nil {
- return m.LeaseOptions
- }
- return nil
-}
-
-func (m *Secret) GetInternalData() string {
- if m != nil {
- return m.InternalData
- }
- return ""
-}
-
-func (m *Secret) GetLeaseID() string {
- if m != nil {
- return m.LeaseID
- }
- return ""
-}
-
-type Response struct {
- // Secret, if not nil, denotes that this response represents a secret.
- Secret *Secret `sentinel:"" protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"`
- // Auth, if not nil, contains the authentication information for
- // this response. This is only checked and means something for
- // credential backends.
- Auth *Auth `sentinel:"" protobuf:"bytes,2,opt,name=auth,proto3" json:"auth,omitempty"`
- // Response data is a JSON object that must have string keys. For
- // secrets, this data is sent down to the user as-is. To store internal
- // data that you don't want the user to see, store it in
- // Secret.InternalData.
- Data string `sentinel:"" protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
- // Redirect is an HTTP URL to redirect to for further authentication.
- // This is only valid for credential backends. This will be blanked
- // for any logical backend and ignored.
- Redirect string `sentinel:"" protobuf:"bytes,4,opt,name=redirect,proto3" json:"redirect,omitempty"`
- // Warnings allow operations or backends to return warnings in response
- // to user actions without failing the action outright.
- Warnings []string `sentinel:"" protobuf:"bytes,5,rep,name=warnings,proto3" json:"warnings,omitempty"`
- // Information for wrapping the response in a cubbyhole
- WrapInfo *ResponseWrapInfo `sentinel:"" protobuf:"bytes,6,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{9}
-}
-
-func (m *Response) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Response.Unmarshal(m, b)
-}
-func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Response.Marshal(b, m, deterministic)
-}
-func (m *Response) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Response.Merge(m, src)
-}
-func (m *Response) XXX_Size() int {
- return xxx_messageInfo_Response.Size(m)
-}
-func (m *Response) XXX_DiscardUnknown() {
- xxx_messageInfo_Response.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Response proto.InternalMessageInfo
-
-func (m *Response) GetSecret() *Secret {
- if m != nil {
- return m.Secret
- }
- return nil
-}
-
-func (m *Response) GetAuth() *Auth {
- if m != nil {
- return m.Auth
- }
- return nil
-}
-
-func (m *Response) GetData() string {
- if m != nil {
- return m.Data
- }
- return ""
-}
-
-func (m *Response) GetRedirect() string {
- if m != nil {
- return m.Redirect
- }
- return ""
-}
-
-func (m *Response) GetWarnings() []string {
- if m != nil {
- return m.Warnings
- }
- return nil
-}
-
-func (m *Response) GetWrapInfo() *ResponseWrapInfo {
- if m != nil {
- return m.WrapInfo
- }
- return nil
-}
-
-type ResponseWrapInfo struct {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
- // The token containing the wrapped response
- Token string `sentinel:"" protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
- // The token accessor for the wrapped response token
- Accessor string `sentinel:"" protobuf:"bytes,3,opt,name=accessor,proto3" json:"accessor,omitempty"`
- // The creation time. This can be used with the TTL to figure out an
- // expected expiration.
- CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
- // If the contained response is the output of a token creation call, the
- // created token's accessor will be accessible here
- WrappedAccessor string `sentinel:"" protobuf:"bytes,5,opt,name=wrapped_accessor,json=wrappedAccessor,proto3" json:"wrapped_accessor,omitempty"`
- // WrappedEntityID is the entity identifier of the caller who initiated the
- // wrapping request
- WrappedEntityID string `sentinel:"" protobuf:"bytes,6,opt,name=wrapped_entity_id,json=wrappedEntityID,proto3" json:"wrapped_entity_id,omitempty"`
- // The format to use. This doesn't get returned, it's only internal.
- Format string `sentinel:"" protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"`
- // CreationPath is the original request path that was used to create
- // the wrapped response.
- CreationPath string `sentinel:"" protobuf:"bytes,8,opt,name=creation_path,json=creationPath,proto3" json:"creation_path,omitempty"`
- // Controls seal wrapping behavior downstream for specific use cases
- SealWrap bool `sentinel:"" protobuf:"varint,9,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResponseWrapInfo) Reset() { *m = ResponseWrapInfo{} }
-func (m *ResponseWrapInfo) String() string { return proto.CompactTextString(m) }
-func (*ResponseWrapInfo) ProtoMessage() {}
-func (*ResponseWrapInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{10}
-}
-
-func (m *ResponseWrapInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ResponseWrapInfo.Unmarshal(m, b)
-}
-func (m *ResponseWrapInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ResponseWrapInfo.Marshal(b, m, deterministic)
-}
-func (m *ResponseWrapInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseWrapInfo.Merge(m, src)
-}
-func (m *ResponseWrapInfo) XXX_Size() int {
- return xxx_messageInfo_ResponseWrapInfo.Size(m)
-}
-func (m *ResponseWrapInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseWrapInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseWrapInfo proto.InternalMessageInfo
-
-func (m *ResponseWrapInfo) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *ResponseWrapInfo) GetToken() string {
- if m != nil {
- return m.Token
- }
- return ""
-}
-
-func (m *ResponseWrapInfo) GetAccessor() string {
- if m != nil {
- return m.Accessor
- }
- return ""
-}
-
-func (m *ResponseWrapInfo) GetCreationTime() *timestamp.Timestamp {
- if m != nil {
- return m.CreationTime
- }
- return nil
-}
-
-func (m *ResponseWrapInfo) GetWrappedAccessor() string {
- if m != nil {
- return m.WrappedAccessor
- }
- return ""
-}
-
-func (m *ResponseWrapInfo) GetWrappedEntityID() string {
- if m != nil {
- return m.WrappedEntityID
- }
- return ""
-}
-
-func (m *ResponseWrapInfo) GetFormat() string {
- if m != nil {
- return m.Format
- }
- return ""
-}
-
-func (m *ResponseWrapInfo) GetCreationPath() string {
- if m != nil {
- return m.CreationPath
- }
- return ""
-}
-
-func (m *ResponseWrapInfo) GetSealWrap() bool {
- if m != nil {
- return m.SealWrap
- }
- return false
-}
-
-type RequestWrapInfo struct {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
- // The format to use for the wrapped response; if not specified it's a bare
- // token
- Format string `sentinel:"" protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"`
- // A flag to conforming backends that data for a given request should be
- // seal wrapped
- SealWrap bool `sentinel:"" protobuf:"varint,3,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestWrapInfo) Reset() { *m = RequestWrapInfo{} }
-func (m *RequestWrapInfo) String() string { return proto.CompactTextString(m) }
-func (*RequestWrapInfo) ProtoMessage() {}
-func (*RequestWrapInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{11}
-}
-
-func (m *RequestWrapInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RequestWrapInfo.Unmarshal(m, b)
-}
-func (m *RequestWrapInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RequestWrapInfo.Marshal(b, m, deterministic)
-}
-func (m *RequestWrapInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestWrapInfo.Merge(m, src)
-}
-func (m *RequestWrapInfo) XXX_Size() int {
- return xxx_messageInfo_RequestWrapInfo.Size(m)
-}
-func (m *RequestWrapInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestWrapInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestWrapInfo proto.InternalMessageInfo
-
-func (m *RequestWrapInfo) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *RequestWrapInfo) GetFormat() string {
- if m != nil {
- return m.Format
- }
- return ""
-}
-
-func (m *RequestWrapInfo) GetSealWrap() bool {
- if m != nil {
- return m.SealWrap
- }
- return false
-}
-
-// HandleRequestArgs is the args for HandleRequest method.
-type HandleRequestArgs struct {
- StorageID uint32 `sentinel:"" protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"`
- Request *Request `sentinel:"" protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HandleRequestArgs) Reset() { *m = HandleRequestArgs{} }
-func (m *HandleRequestArgs) String() string { return proto.CompactTextString(m) }
-func (*HandleRequestArgs) ProtoMessage() {}
-func (*HandleRequestArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{12}
-}
-
-func (m *HandleRequestArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HandleRequestArgs.Unmarshal(m, b)
-}
-func (m *HandleRequestArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HandleRequestArgs.Marshal(b, m, deterministic)
-}
-func (m *HandleRequestArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HandleRequestArgs.Merge(m, src)
-}
-func (m *HandleRequestArgs) XXX_Size() int {
- return xxx_messageInfo_HandleRequestArgs.Size(m)
-}
-func (m *HandleRequestArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_HandleRequestArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HandleRequestArgs proto.InternalMessageInfo
-
-func (m *HandleRequestArgs) GetStorageID() uint32 {
- if m != nil {
- return m.StorageID
- }
- return 0
-}
-
-func (m *HandleRequestArgs) GetRequest() *Request {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-// HandleRequestReply is the reply for HandleRequest method.
-type HandleRequestReply struct {
- Response *Response `sentinel:"" protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
- Err *ProtoError `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HandleRequestReply) Reset() { *m = HandleRequestReply{} }
-func (m *HandleRequestReply) String() string { return proto.CompactTextString(m) }
-func (*HandleRequestReply) ProtoMessage() {}
-func (*HandleRequestReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{13}
-}
-
-func (m *HandleRequestReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HandleRequestReply.Unmarshal(m, b)
-}
-func (m *HandleRequestReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HandleRequestReply.Marshal(b, m, deterministic)
-}
-func (m *HandleRequestReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HandleRequestReply.Merge(m, src)
-}
-func (m *HandleRequestReply) XXX_Size() int {
- return xxx_messageInfo_HandleRequestReply.Size(m)
-}
-func (m *HandleRequestReply) XXX_DiscardUnknown() {
- xxx_messageInfo_HandleRequestReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HandleRequestReply proto.InternalMessageInfo
-
-func (m *HandleRequestReply) GetResponse() *Response {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (m *HandleRequestReply) GetErr() *ProtoError {
- if m != nil {
- return m.Err
- }
- return nil
-}
-
-// SpecialPathsReply is the reply for SpecialPaths method.
-type SpecialPathsReply struct {
- Paths *Paths `sentinel:"" protobuf:"bytes,1,opt,name=paths,proto3" json:"paths,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SpecialPathsReply) Reset() { *m = SpecialPathsReply{} }
-func (m *SpecialPathsReply) String() string { return proto.CompactTextString(m) }
-func (*SpecialPathsReply) ProtoMessage() {}
-func (*SpecialPathsReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{14}
-}
-
-func (m *SpecialPathsReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SpecialPathsReply.Unmarshal(m, b)
-}
-func (m *SpecialPathsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SpecialPathsReply.Marshal(b, m, deterministic)
-}
-func (m *SpecialPathsReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SpecialPathsReply.Merge(m, src)
-}
-func (m *SpecialPathsReply) XXX_Size() int {
- return xxx_messageInfo_SpecialPathsReply.Size(m)
-}
-func (m *SpecialPathsReply) XXX_DiscardUnknown() {
- xxx_messageInfo_SpecialPathsReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SpecialPathsReply proto.InternalMessageInfo
-
-func (m *SpecialPathsReply) GetPaths() *Paths {
- if m != nil {
- return m.Paths
- }
- return nil
-}
-
-// HandleExistenceCheckArgs is the args for HandleExistenceCheck method.
-type HandleExistenceCheckArgs struct {
- StorageID uint32 `sentinel:"" protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"`
- Request *Request `sentinel:"" protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HandleExistenceCheckArgs) Reset() { *m = HandleExistenceCheckArgs{} }
-func (m *HandleExistenceCheckArgs) String() string { return proto.CompactTextString(m) }
-func (*HandleExistenceCheckArgs) ProtoMessage() {}
-func (*HandleExistenceCheckArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{15}
-}
-
-func (m *HandleExistenceCheckArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HandleExistenceCheckArgs.Unmarshal(m, b)
-}
-func (m *HandleExistenceCheckArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HandleExistenceCheckArgs.Marshal(b, m, deterministic)
-}
-func (m *HandleExistenceCheckArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HandleExistenceCheckArgs.Merge(m, src)
-}
-func (m *HandleExistenceCheckArgs) XXX_Size() int {
- return xxx_messageInfo_HandleExistenceCheckArgs.Size(m)
-}
-func (m *HandleExistenceCheckArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_HandleExistenceCheckArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HandleExistenceCheckArgs proto.InternalMessageInfo
-
-func (m *HandleExistenceCheckArgs) GetStorageID() uint32 {
- if m != nil {
- return m.StorageID
- }
- return 0
-}
-
-func (m *HandleExistenceCheckArgs) GetRequest() *Request {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-// HandleExistenceCheckReply is the reply for HandleExistenceCheck method.
-type HandleExistenceCheckReply struct {
- CheckFound bool `sentinel:"" protobuf:"varint,1,opt,name=check_found,json=checkFound,proto3" json:"check_found,omitempty"`
- Exists bool `sentinel:"" protobuf:"varint,2,opt,name=exists,proto3" json:"exists,omitempty"`
- Err *ProtoError `sentinel:"" protobuf:"bytes,3,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HandleExistenceCheckReply) Reset() { *m = HandleExistenceCheckReply{} }
-func (m *HandleExistenceCheckReply) String() string { return proto.CompactTextString(m) }
-func (*HandleExistenceCheckReply) ProtoMessage() {}
-func (*HandleExistenceCheckReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{16}
-}
-
-func (m *HandleExistenceCheckReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HandleExistenceCheckReply.Unmarshal(m, b)
-}
-func (m *HandleExistenceCheckReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HandleExistenceCheckReply.Marshal(b, m, deterministic)
-}
-func (m *HandleExistenceCheckReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HandleExistenceCheckReply.Merge(m, src)
-}
-func (m *HandleExistenceCheckReply) XXX_Size() int {
- return xxx_messageInfo_HandleExistenceCheckReply.Size(m)
-}
-func (m *HandleExistenceCheckReply) XXX_DiscardUnknown() {
- xxx_messageInfo_HandleExistenceCheckReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HandleExistenceCheckReply proto.InternalMessageInfo
-
-func (m *HandleExistenceCheckReply) GetCheckFound() bool {
- if m != nil {
- return m.CheckFound
- }
- return false
-}
-
-func (m *HandleExistenceCheckReply) GetExists() bool {
- if m != nil {
- return m.Exists
- }
- return false
-}
-
-func (m *HandleExistenceCheckReply) GetErr() *ProtoError {
- if m != nil {
- return m.Err
- }
- return nil
-}
-
-// SetupArgs is the args for Setup method.
-type SetupArgs struct {
- BrokerID uint32 `sentinel:"" protobuf:"varint,1,opt,name=broker_id,json=brokerId,proto3" json:"broker_id,omitempty"`
- Config map[string]string `sentinel:"" protobuf:"bytes,2,rep,name=Config,proto3" json:"Config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- BackendUUID string `sentinel:"" protobuf:"bytes,3,opt,name=backendUUID,proto3" json:"backendUUID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetupArgs) Reset() { *m = SetupArgs{} }
-func (m *SetupArgs) String() string { return proto.CompactTextString(m) }
-func (*SetupArgs) ProtoMessage() {}
-func (*SetupArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{17}
-}
-
-func (m *SetupArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetupArgs.Unmarshal(m, b)
-}
-func (m *SetupArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetupArgs.Marshal(b, m, deterministic)
-}
-func (m *SetupArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetupArgs.Merge(m, src)
-}
-func (m *SetupArgs) XXX_Size() int {
- return xxx_messageInfo_SetupArgs.Size(m)
-}
-func (m *SetupArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_SetupArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetupArgs proto.InternalMessageInfo
-
-func (m *SetupArgs) GetBrokerID() uint32 {
- if m != nil {
- return m.BrokerID
- }
- return 0
-}
-
-func (m *SetupArgs) GetConfig() map[string]string {
- if m != nil {
- return m.Config
- }
- return nil
-}
-
-func (m *SetupArgs) GetBackendUUID() string {
- if m != nil {
- return m.BackendUUID
- }
- return ""
-}
-
-// SetupReply is the reply for Setup method.
-type SetupReply struct {
- Err string `sentinel:"" protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetupReply) Reset() { *m = SetupReply{} }
-func (m *SetupReply) String() string { return proto.CompactTextString(m) }
-func (*SetupReply) ProtoMessage() {}
-func (*SetupReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{18}
-}
-
-func (m *SetupReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetupReply.Unmarshal(m, b)
-}
-func (m *SetupReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetupReply.Marshal(b, m, deterministic)
-}
-func (m *SetupReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetupReply.Merge(m, src)
-}
-func (m *SetupReply) XXX_Size() int {
- return xxx_messageInfo_SetupReply.Size(m)
-}
-func (m *SetupReply) XXX_DiscardUnknown() {
- xxx_messageInfo_SetupReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetupReply proto.InternalMessageInfo
-
-func (m *SetupReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-// TypeReply is the reply for the Type method.
-type TypeReply struct {
- Type uint32 `sentinel:"" protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TypeReply) Reset() { *m = TypeReply{} }
-func (m *TypeReply) String() string { return proto.CompactTextString(m) }
-func (*TypeReply) ProtoMessage() {}
-func (*TypeReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{19}
-}
-
-func (m *TypeReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TypeReply.Unmarshal(m, b)
-}
-func (m *TypeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TypeReply.Marshal(b, m, deterministic)
-}
-func (m *TypeReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeReply.Merge(m, src)
-}
-func (m *TypeReply) XXX_Size() int {
- return xxx_messageInfo_TypeReply.Size(m)
-}
-func (m *TypeReply) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TypeReply proto.InternalMessageInfo
-
-func (m *TypeReply) GetType() uint32 {
- if m != nil {
- return m.Type
- }
- return 0
-}
-
-type InvalidateKeyArgs struct {
- Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InvalidateKeyArgs) Reset() { *m = InvalidateKeyArgs{} }
-func (m *InvalidateKeyArgs) String() string { return proto.CompactTextString(m) }
-func (*InvalidateKeyArgs) ProtoMessage() {}
-func (*InvalidateKeyArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{20}
-}
-
-func (m *InvalidateKeyArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_InvalidateKeyArgs.Unmarshal(m, b)
-}
-func (m *InvalidateKeyArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_InvalidateKeyArgs.Marshal(b, m, deterministic)
-}
-func (m *InvalidateKeyArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InvalidateKeyArgs.Merge(m, src)
-}
-func (m *InvalidateKeyArgs) XXX_Size() int {
- return xxx_messageInfo_InvalidateKeyArgs.Size(m)
-}
-func (m *InvalidateKeyArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_InvalidateKeyArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InvalidateKeyArgs proto.InternalMessageInfo
-
-func (m *InvalidateKeyArgs) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-type StorageEntry struct {
- Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `sentinel:"" protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- SealWrap bool `sentinel:"" protobuf:"varint,3,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StorageEntry) Reset() { *m = StorageEntry{} }
-func (m *StorageEntry) String() string { return proto.CompactTextString(m) }
-func (*StorageEntry) ProtoMessage() {}
-func (*StorageEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{21}
-}
-
-func (m *StorageEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StorageEntry.Unmarshal(m, b)
-}
-func (m *StorageEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StorageEntry.Marshal(b, m, deterministic)
-}
-func (m *StorageEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageEntry.Merge(m, src)
-}
-func (m *StorageEntry) XXX_Size() int {
- return xxx_messageInfo_StorageEntry.Size(m)
-}
-func (m *StorageEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageEntry proto.InternalMessageInfo
-
-func (m *StorageEntry) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *StorageEntry) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *StorageEntry) GetSealWrap() bool {
- if m != nil {
- return m.SealWrap
- }
- return false
-}
-
-type StorageListArgs struct {
- Prefix string `sentinel:"" protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StorageListArgs) Reset() { *m = StorageListArgs{} }
-func (m *StorageListArgs) String() string { return proto.CompactTextString(m) }
-func (*StorageListArgs) ProtoMessage() {}
-func (*StorageListArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{22}
-}
-
-func (m *StorageListArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StorageListArgs.Unmarshal(m, b)
-}
-func (m *StorageListArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StorageListArgs.Marshal(b, m, deterministic)
-}
-func (m *StorageListArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageListArgs.Merge(m, src)
-}
-func (m *StorageListArgs) XXX_Size() int {
- return xxx_messageInfo_StorageListArgs.Size(m)
-}
-func (m *StorageListArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageListArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageListArgs proto.InternalMessageInfo
-
-func (m *StorageListArgs) GetPrefix() string {
- if m != nil {
- return m.Prefix
- }
- return ""
-}
-
-type StorageListReply struct {
- Keys []string `sentinel:"" protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
- Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StorageListReply) Reset() { *m = StorageListReply{} }
-func (m *StorageListReply) String() string { return proto.CompactTextString(m) }
-func (*StorageListReply) ProtoMessage() {}
-func (*StorageListReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{23}
-}
-
-func (m *StorageListReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StorageListReply.Unmarshal(m, b)
-}
-func (m *StorageListReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StorageListReply.Marshal(b, m, deterministic)
-}
-func (m *StorageListReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageListReply.Merge(m, src)
-}
-func (m *StorageListReply) XXX_Size() int {
- return xxx_messageInfo_StorageListReply.Size(m)
-}
-func (m *StorageListReply) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageListReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageListReply proto.InternalMessageInfo
-
-func (m *StorageListReply) GetKeys() []string {
- if m != nil {
- return m.Keys
- }
- return nil
-}
-
-func (m *StorageListReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-type StorageGetArgs struct {
- Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StorageGetArgs) Reset() { *m = StorageGetArgs{} }
-func (m *StorageGetArgs) String() string { return proto.CompactTextString(m) }
-func (*StorageGetArgs) ProtoMessage() {}
-func (*StorageGetArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{24}
-}
-
-func (m *StorageGetArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StorageGetArgs.Unmarshal(m, b)
-}
-func (m *StorageGetArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StorageGetArgs.Marshal(b, m, deterministic)
-}
-func (m *StorageGetArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageGetArgs.Merge(m, src)
-}
-func (m *StorageGetArgs) XXX_Size() int {
- return xxx_messageInfo_StorageGetArgs.Size(m)
-}
-func (m *StorageGetArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageGetArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageGetArgs proto.InternalMessageInfo
-
-func (m *StorageGetArgs) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-type StorageGetReply struct {
- Entry *StorageEntry `sentinel:"" protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
- Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StorageGetReply) Reset() { *m = StorageGetReply{} }
-func (m *StorageGetReply) String() string { return proto.CompactTextString(m) }
-func (*StorageGetReply) ProtoMessage() {}
-func (*StorageGetReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{25}
-}
-
-func (m *StorageGetReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StorageGetReply.Unmarshal(m, b)
-}
-func (m *StorageGetReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StorageGetReply.Marshal(b, m, deterministic)
-}
-func (m *StorageGetReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageGetReply.Merge(m, src)
-}
-func (m *StorageGetReply) XXX_Size() int {
- return xxx_messageInfo_StorageGetReply.Size(m)
-}
-func (m *StorageGetReply) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageGetReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageGetReply proto.InternalMessageInfo
-
-func (m *StorageGetReply) GetEntry() *StorageEntry {
- if m != nil {
- return m.Entry
- }
- return nil
-}
-
-func (m *StorageGetReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-type StoragePutArgs struct {
- Entry *StorageEntry `sentinel:"" protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StoragePutArgs) Reset() { *m = StoragePutArgs{} }
-func (m *StoragePutArgs) String() string { return proto.CompactTextString(m) }
-func (*StoragePutArgs) ProtoMessage() {}
-func (*StoragePutArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{26}
-}
-
-func (m *StoragePutArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StoragePutArgs.Unmarshal(m, b)
-}
-func (m *StoragePutArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StoragePutArgs.Marshal(b, m, deterministic)
-}
-func (m *StoragePutArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StoragePutArgs.Merge(m, src)
-}
-func (m *StoragePutArgs) XXX_Size() int {
- return xxx_messageInfo_StoragePutArgs.Size(m)
-}
-func (m *StoragePutArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_StoragePutArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StoragePutArgs proto.InternalMessageInfo
-
-func (m *StoragePutArgs) GetEntry() *StorageEntry {
- if m != nil {
- return m.Entry
- }
- return nil
-}
-
-type StoragePutReply struct {
- Err string `sentinel:"" protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StoragePutReply) Reset() { *m = StoragePutReply{} }
-func (m *StoragePutReply) String() string { return proto.CompactTextString(m) }
-func (*StoragePutReply) ProtoMessage() {}
-func (*StoragePutReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{27}
-}
-
-func (m *StoragePutReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StoragePutReply.Unmarshal(m, b)
-}
-func (m *StoragePutReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StoragePutReply.Marshal(b, m, deterministic)
-}
-func (m *StoragePutReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StoragePutReply.Merge(m, src)
-}
-func (m *StoragePutReply) XXX_Size() int {
- return xxx_messageInfo_StoragePutReply.Size(m)
-}
-func (m *StoragePutReply) XXX_DiscardUnknown() {
- xxx_messageInfo_StoragePutReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StoragePutReply proto.InternalMessageInfo
-
-func (m *StoragePutReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-type StorageDeleteArgs struct {
- Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StorageDeleteArgs) Reset() { *m = StorageDeleteArgs{} }
-func (m *StorageDeleteArgs) String() string { return proto.CompactTextString(m) }
-func (*StorageDeleteArgs) ProtoMessage() {}
-func (*StorageDeleteArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{28}
-}
-
-func (m *StorageDeleteArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StorageDeleteArgs.Unmarshal(m, b)
-}
-func (m *StorageDeleteArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StorageDeleteArgs.Marshal(b, m, deterministic)
-}
-func (m *StorageDeleteArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageDeleteArgs.Merge(m, src)
-}
-func (m *StorageDeleteArgs) XXX_Size() int {
- return xxx_messageInfo_StorageDeleteArgs.Size(m)
-}
-func (m *StorageDeleteArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageDeleteArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageDeleteArgs proto.InternalMessageInfo
-
-func (m *StorageDeleteArgs) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-type StorageDeleteReply struct {
- Err string `sentinel:"" protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StorageDeleteReply) Reset() { *m = StorageDeleteReply{} }
-func (m *StorageDeleteReply) String() string { return proto.CompactTextString(m) }
-func (*StorageDeleteReply) ProtoMessage() {}
-func (*StorageDeleteReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{29}
-}
-
-func (m *StorageDeleteReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StorageDeleteReply.Unmarshal(m, b)
-}
-func (m *StorageDeleteReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StorageDeleteReply.Marshal(b, m, deterministic)
-}
-func (m *StorageDeleteReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageDeleteReply.Merge(m, src)
-}
-func (m *StorageDeleteReply) XXX_Size() int {
- return xxx_messageInfo_StorageDeleteReply.Size(m)
-}
-func (m *StorageDeleteReply) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageDeleteReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageDeleteReply proto.InternalMessageInfo
-
-func (m *StorageDeleteReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-type TTLReply struct {
- TTL int64 `sentinel:"" protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TTLReply) Reset() { *m = TTLReply{} }
-func (m *TTLReply) String() string { return proto.CompactTextString(m) }
-func (*TTLReply) ProtoMessage() {}
-func (*TTLReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{30}
-}
-
-func (m *TTLReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TTLReply.Unmarshal(m, b)
-}
-func (m *TTLReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TTLReply.Marshal(b, m, deterministic)
-}
-func (m *TTLReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TTLReply.Merge(m, src)
-}
-func (m *TTLReply) XXX_Size() int {
- return xxx_messageInfo_TTLReply.Size(m)
-}
-func (m *TTLReply) XXX_DiscardUnknown() {
- xxx_messageInfo_TTLReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TTLReply proto.InternalMessageInfo
-
-func (m *TTLReply) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-type SudoPrivilegeArgs struct {
- Path string `sentinel:"" protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
- Token string `sentinel:"" protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SudoPrivilegeArgs) Reset() { *m = SudoPrivilegeArgs{} }
-func (m *SudoPrivilegeArgs) String() string { return proto.CompactTextString(m) }
-func (*SudoPrivilegeArgs) ProtoMessage() {}
-func (*SudoPrivilegeArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{31}
-}
-
-func (m *SudoPrivilegeArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SudoPrivilegeArgs.Unmarshal(m, b)
-}
-func (m *SudoPrivilegeArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SudoPrivilegeArgs.Marshal(b, m, deterministic)
-}
-func (m *SudoPrivilegeArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SudoPrivilegeArgs.Merge(m, src)
-}
-func (m *SudoPrivilegeArgs) XXX_Size() int {
- return xxx_messageInfo_SudoPrivilegeArgs.Size(m)
-}
-func (m *SudoPrivilegeArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_SudoPrivilegeArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SudoPrivilegeArgs proto.InternalMessageInfo
-
-func (m *SudoPrivilegeArgs) GetPath() string {
- if m != nil {
- return m.Path
- }
- return ""
-}
-
-func (m *SudoPrivilegeArgs) GetToken() string {
- if m != nil {
- return m.Token
- }
- return ""
-}
-
-type SudoPrivilegeReply struct {
- Sudo bool `sentinel:"" protobuf:"varint,1,opt,name=sudo,proto3" json:"sudo,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SudoPrivilegeReply) Reset() { *m = SudoPrivilegeReply{} }
-func (m *SudoPrivilegeReply) String() string { return proto.CompactTextString(m) }
-func (*SudoPrivilegeReply) ProtoMessage() {}
-func (*SudoPrivilegeReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{32}
-}
-
-func (m *SudoPrivilegeReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SudoPrivilegeReply.Unmarshal(m, b)
-}
-func (m *SudoPrivilegeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SudoPrivilegeReply.Marshal(b, m, deterministic)
-}
-func (m *SudoPrivilegeReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SudoPrivilegeReply.Merge(m, src)
-}
-func (m *SudoPrivilegeReply) XXX_Size() int {
- return xxx_messageInfo_SudoPrivilegeReply.Size(m)
-}
-func (m *SudoPrivilegeReply) XXX_DiscardUnknown() {
- xxx_messageInfo_SudoPrivilegeReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SudoPrivilegeReply proto.InternalMessageInfo
-
-func (m *SudoPrivilegeReply) GetSudo() bool {
- if m != nil {
- return m.Sudo
- }
- return false
-}
-
-type TaintedReply struct {
- Tainted bool `sentinel:"" protobuf:"varint,1,opt,name=tainted,proto3" json:"tainted,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TaintedReply) Reset() { *m = TaintedReply{} }
-func (m *TaintedReply) String() string { return proto.CompactTextString(m) }
-func (*TaintedReply) ProtoMessage() {}
-func (*TaintedReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{33}
-}
-
-func (m *TaintedReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TaintedReply.Unmarshal(m, b)
-}
-func (m *TaintedReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TaintedReply.Marshal(b, m, deterministic)
-}
-func (m *TaintedReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TaintedReply.Merge(m, src)
-}
-func (m *TaintedReply) XXX_Size() int {
- return xxx_messageInfo_TaintedReply.Size(m)
-}
-func (m *TaintedReply) XXX_DiscardUnknown() {
- xxx_messageInfo_TaintedReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TaintedReply proto.InternalMessageInfo
-
-func (m *TaintedReply) GetTainted() bool {
- if m != nil {
- return m.Tainted
- }
- return false
-}
-
-type CachingDisabledReply struct {
- Disabled bool `sentinel:"" protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CachingDisabledReply) Reset() { *m = CachingDisabledReply{} }
-func (m *CachingDisabledReply) String() string { return proto.CompactTextString(m) }
-func (*CachingDisabledReply) ProtoMessage() {}
-func (*CachingDisabledReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{34}
-}
-
-func (m *CachingDisabledReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CachingDisabledReply.Unmarshal(m, b)
-}
-func (m *CachingDisabledReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CachingDisabledReply.Marshal(b, m, deterministic)
-}
-func (m *CachingDisabledReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CachingDisabledReply.Merge(m, src)
-}
-func (m *CachingDisabledReply) XXX_Size() int {
- return xxx_messageInfo_CachingDisabledReply.Size(m)
-}
-func (m *CachingDisabledReply) XXX_DiscardUnknown() {
- xxx_messageInfo_CachingDisabledReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CachingDisabledReply proto.InternalMessageInfo
-
-func (m *CachingDisabledReply) GetDisabled() bool {
- if m != nil {
- return m.Disabled
- }
- return false
-}
-
-type ReplicationStateReply struct {
- State int32 `sentinel:"" protobuf:"varint,1,opt,name=state,proto3" json:"state,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ReplicationStateReply) Reset() { *m = ReplicationStateReply{} }
-func (m *ReplicationStateReply) String() string { return proto.CompactTextString(m) }
-func (*ReplicationStateReply) ProtoMessage() {}
-func (*ReplicationStateReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{35}
-}
-
-func (m *ReplicationStateReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ReplicationStateReply.Unmarshal(m, b)
-}
-func (m *ReplicationStateReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ReplicationStateReply.Marshal(b, m, deterministic)
-}
-func (m *ReplicationStateReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicationStateReply.Merge(m, src)
-}
-func (m *ReplicationStateReply) XXX_Size() int {
- return xxx_messageInfo_ReplicationStateReply.Size(m)
-}
-func (m *ReplicationStateReply) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicationStateReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ReplicationStateReply proto.InternalMessageInfo
-
-func (m *ReplicationStateReply) GetState() int32 {
- if m != nil {
- return m.State
- }
- return 0
-}
-
-type ResponseWrapDataArgs struct {
- Data string `sentinel:"" protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
- TTL int64 `sentinel:"" protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
- JWT bool `sentinel:"" protobuf:"varint,3,opt,name=JWT,proto3" json:"JWT,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResponseWrapDataArgs) Reset() { *m = ResponseWrapDataArgs{} }
-func (m *ResponseWrapDataArgs) String() string { return proto.CompactTextString(m) }
-func (*ResponseWrapDataArgs) ProtoMessage() {}
-func (*ResponseWrapDataArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{36}
-}
-
-func (m *ResponseWrapDataArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ResponseWrapDataArgs.Unmarshal(m, b)
-}
-func (m *ResponseWrapDataArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ResponseWrapDataArgs.Marshal(b, m, deterministic)
-}
-func (m *ResponseWrapDataArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseWrapDataArgs.Merge(m, src)
-}
-func (m *ResponseWrapDataArgs) XXX_Size() int {
- return xxx_messageInfo_ResponseWrapDataArgs.Size(m)
-}
-func (m *ResponseWrapDataArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseWrapDataArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseWrapDataArgs proto.InternalMessageInfo
-
-func (m *ResponseWrapDataArgs) GetData() string {
- if m != nil {
- return m.Data
- }
- return ""
-}
-
-func (m *ResponseWrapDataArgs) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *ResponseWrapDataArgs) GetJWT() bool {
- if m != nil {
- return m.JWT
- }
- return false
-}
-
-type ResponseWrapDataReply struct {
- WrapInfo *ResponseWrapInfo `sentinel:"" protobuf:"bytes,1,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"`
- Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResponseWrapDataReply) Reset() { *m = ResponseWrapDataReply{} }
-func (m *ResponseWrapDataReply) String() string { return proto.CompactTextString(m) }
-func (*ResponseWrapDataReply) ProtoMessage() {}
-func (*ResponseWrapDataReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{37}
-}
-
-func (m *ResponseWrapDataReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ResponseWrapDataReply.Unmarshal(m, b)
-}
-func (m *ResponseWrapDataReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ResponseWrapDataReply.Marshal(b, m, deterministic)
-}
-func (m *ResponseWrapDataReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseWrapDataReply.Merge(m, src)
-}
-func (m *ResponseWrapDataReply) XXX_Size() int {
- return xxx_messageInfo_ResponseWrapDataReply.Size(m)
-}
-func (m *ResponseWrapDataReply) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseWrapDataReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseWrapDataReply proto.InternalMessageInfo
-
-func (m *ResponseWrapDataReply) GetWrapInfo() *ResponseWrapInfo {
- if m != nil {
- return m.WrapInfo
- }
- return nil
-}
-
-func (m *ResponseWrapDataReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-type MlockEnabledReply struct {
- Enabled bool `sentinel:"" protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MlockEnabledReply) Reset() { *m = MlockEnabledReply{} }
-func (m *MlockEnabledReply) String() string { return proto.CompactTextString(m) }
-func (*MlockEnabledReply) ProtoMessage() {}
-func (*MlockEnabledReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{38}
-}
-
-func (m *MlockEnabledReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MlockEnabledReply.Unmarshal(m, b)
-}
-func (m *MlockEnabledReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MlockEnabledReply.Marshal(b, m, deterministic)
-}
-func (m *MlockEnabledReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MlockEnabledReply.Merge(m, src)
-}
-func (m *MlockEnabledReply) XXX_Size() int {
- return xxx_messageInfo_MlockEnabledReply.Size(m)
-}
-func (m *MlockEnabledReply) XXX_DiscardUnknown() {
- xxx_messageInfo_MlockEnabledReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MlockEnabledReply proto.InternalMessageInfo
-
-func (m *MlockEnabledReply) GetEnabled() bool {
- if m != nil {
- return m.Enabled
- }
- return false
-}
-
-type LocalMountReply struct {
- Local bool `sentinel:"" protobuf:"varint,1,opt,name=local,proto3" json:"local,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LocalMountReply) Reset() { *m = LocalMountReply{} }
-func (m *LocalMountReply) String() string { return proto.CompactTextString(m) }
-func (*LocalMountReply) ProtoMessage() {}
-func (*LocalMountReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{39}
-}
-
-func (m *LocalMountReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LocalMountReply.Unmarshal(m, b)
-}
-func (m *LocalMountReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LocalMountReply.Marshal(b, m, deterministic)
-}
-func (m *LocalMountReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LocalMountReply.Merge(m, src)
-}
-func (m *LocalMountReply) XXX_Size() int {
- return xxx_messageInfo_LocalMountReply.Size(m)
-}
-func (m *LocalMountReply) XXX_DiscardUnknown() {
- xxx_messageInfo_LocalMountReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LocalMountReply proto.InternalMessageInfo
-
-func (m *LocalMountReply) GetLocal() bool {
- if m != nil {
- return m.Local
- }
- return false
-}
-
-type EntityInfoArgs struct {
- EntityID string `sentinel:"" protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EntityInfoArgs) Reset() { *m = EntityInfoArgs{} }
-func (m *EntityInfoArgs) String() string { return proto.CompactTextString(m) }
-func (*EntityInfoArgs) ProtoMessage() {}
-func (*EntityInfoArgs) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{40}
-}
-
-func (m *EntityInfoArgs) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EntityInfoArgs.Unmarshal(m, b)
-}
-func (m *EntityInfoArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EntityInfoArgs.Marshal(b, m, deterministic)
-}
-func (m *EntityInfoArgs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EntityInfoArgs.Merge(m, src)
-}
-func (m *EntityInfoArgs) XXX_Size() int {
- return xxx_messageInfo_EntityInfoArgs.Size(m)
-}
-func (m *EntityInfoArgs) XXX_DiscardUnknown() {
- xxx_messageInfo_EntityInfoArgs.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityInfoArgs proto.InternalMessageInfo
-
-func (m *EntityInfoArgs) GetEntityID() string {
- if m != nil {
- return m.EntityID
- }
- return ""
-}
-
-type EntityInfoReply struct {
- Entity *logical.Entity `sentinel:"" protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
- Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EntityInfoReply) Reset() { *m = EntityInfoReply{} }
-func (m *EntityInfoReply) String() string { return proto.CompactTextString(m) }
-func (*EntityInfoReply) ProtoMessage() {}
-func (*EntityInfoReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{41}
-}
-
-func (m *EntityInfoReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EntityInfoReply.Unmarshal(m, b)
-}
-func (m *EntityInfoReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EntityInfoReply.Marshal(b, m, deterministic)
-}
-func (m *EntityInfoReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EntityInfoReply.Merge(m, src)
-}
-func (m *EntityInfoReply) XXX_Size() int {
- return xxx_messageInfo_EntityInfoReply.Size(m)
-}
-func (m *EntityInfoReply) XXX_DiscardUnknown() {
- xxx_messageInfo_EntityInfoReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityInfoReply proto.InternalMessageInfo
-
-func (m *EntityInfoReply) GetEntity() *logical.Entity {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *EntityInfoReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-type PluginEnvReply struct {
- PluginEnvironment *logical.PluginEnvironment `sentinel:"" protobuf:"bytes,1,opt,name=plugin_environment,json=pluginEnvironment,proto3" json:"plugin_environment,omitempty"`
- Err string `sentinel:"" protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PluginEnvReply) Reset() { *m = PluginEnvReply{} }
-func (m *PluginEnvReply) String() string { return proto.CompactTextString(m) }
-func (*PluginEnvReply) ProtoMessage() {}
-func (*PluginEnvReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{42}
-}
-
-func (m *PluginEnvReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PluginEnvReply.Unmarshal(m, b)
-}
-func (m *PluginEnvReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PluginEnvReply.Marshal(b, m, deterministic)
-}
-func (m *PluginEnvReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PluginEnvReply.Merge(m, src)
-}
-func (m *PluginEnvReply) XXX_Size() int {
- return xxx_messageInfo_PluginEnvReply.Size(m)
-}
-func (m *PluginEnvReply) XXX_DiscardUnknown() {
- xxx_messageInfo_PluginEnvReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PluginEnvReply proto.InternalMessageInfo
-
-func (m *PluginEnvReply) GetPluginEnvironment() *logical.PluginEnvironment {
- if m != nil {
- return m.PluginEnvironment
- }
- return nil
-}
-
-func (m *PluginEnvReply) GetErr() string {
- if m != nil {
- return m.Err
- }
- return ""
-}
-
-type Connection struct {
- // RemoteAddr is the network address that sent the request.
- RemoteAddr string `sentinel:"" protobuf:"bytes,1,opt,name=remote_addr,json=remoteAddr,proto3" json:"remote_addr,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Connection) Reset() { *m = Connection{} }
-func (m *Connection) String() string { return proto.CompactTextString(m) }
-func (*Connection) ProtoMessage() {}
-func (*Connection) Descriptor() ([]byte, []int) {
- return fileDescriptor_25821d34acc7c5ef, []int{43}
-}
-
-func (m *Connection) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Connection.Unmarshal(m, b)
-}
-func (m *Connection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Connection.Marshal(b, m, deterministic)
-}
-func (m *Connection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Connection.Merge(m, src)
-}
-func (m *Connection) XXX_Size() int {
- return xxx_messageInfo_Connection.Size(m)
-}
-func (m *Connection) XXX_DiscardUnknown() {
- xxx_messageInfo_Connection.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Connection proto.InternalMessageInfo
-
-func (m *Connection) GetRemoteAddr() string {
- if m != nil {
- return m.RemoteAddr
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*Empty)(nil), "pb.Empty")
- proto.RegisterType((*Header)(nil), "pb.Header")
- proto.RegisterType((*ProtoError)(nil), "pb.ProtoError")
- proto.RegisterType((*Paths)(nil), "pb.Paths")
- proto.RegisterType((*Request)(nil), "pb.Request")
- proto.RegisterMapType((map[string]*Header)(nil), "pb.Request.HeadersEntry")
- proto.RegisterType((*Auth)(nil), "pb.Auth")
- proto.RegisterMapType((map[string]string)(nil), "pb.Auth.MetadataEntry")
- proto.RegisterType((*TokenEntry)(nil), "pb.TokenEntry")
- proto.RegisterMapType((map[string]string)(nil), "pb.TokenEntry.MetaEntry")
- proto.RegisterType((*LeaseOptions)(nil), "pb.LeaseOptions")
- proto.RegisterType((*Secret)(nil), "pb.Secret")
- proto.RegisterType((*Response)(nil), "pb.Response")
- proto.RegisterType((*ResponseWrapInfo)(nil), "pb.ResponseWrapInfo")
- proto.RegisterType((*RequestWrapInfo)(nil), "pb.RequestWrapInfo")
- proto.RegisterType((*HandleRequestArgs)(nil), "pb.HandleRequestArgs")
- proto.RegisterType((*HandleRequestReply)(nil), "pb.HandleRequestReply")
- proto.RegisterType((*SpecialPathsReply)(nil), "pb.SpecialPathsReply")
- proto.RegisterType((*HandleExistenceCheckArgs)(nil), "pb.HandleExistenceCheckArgs")
- proto.RegisterType((*HandleExistenceCheckReply)(nil), "pb.HandleExistenceCheckReply")
- proto.RegisterType((*SetupArgs)(nil), "pb.SetupArgs")
- proto.RegisterMapType((map[string]string)(nil), "pb.SetupArgs.ConfigEntry")
- proto.RegisterType((*SetupReply)(nil), "pb.SetupReply")
- proto.RegisterType((*TypeReply)(nil), "pb.TypeReply")
- proto.RegisterType((*InvalidateKeyArgs)(nil), "pb.InvalidateKeyArgs")
- proto.RegisterType((*StorageEntry)(nil), "pb.StorageEntry")
- proto.RegisterType((*StorageListArgs)(nil), "pb.StorageListArgs")
- proto.RegisterType((*StorageListReply)(nil), "pb.StorageListReply")
- proto.RegisterType((*StorageGetArgs)(nil), "pb.StorageGetArgs")
- proto.RegisterType((*StorageGetReply)(nil), "pb.StorageGetReply")
- proto.RegisterType((*StoragePutArgs)(nil), "pb.StoragePutArgs")
- proto.RegisterType((*StoragePutReply)(nil), "pb.StoragePutReply")
- proto.RegisterType((*StorageDeleteArgs)(nil), "pb.StorageDeleteArgs")
- proto.RegisterType((*StorageDeleteReply)(nil), "pb.StorageDeleteReply")
- proto.RegisterType((*TTLReply)(nil), "pb.TTLReply")
- proto.RegisterType((*SudoPrivilegeArgs)(nil), "pb.SudoPrivilegeArgs")
- proto.RegisterType((*SudoPrivilegeReply)(nil), "pb.SudoPrivilegeReply")
- proto.RegisterType((*TaintedReply)(nil), "pb.TaintedReply")
- proto.RegisterType((*CachingDisabledReply)(nil), "pb.CachingDisabledReply")
- proto.RegisterType((*ReplicationStateReply)(nil), "pb.ReplicationStateReply")
- proto.RegisterType((*ResponseWrapDataArgs)(nil), "pb.ResponseWrapDataArgs")
- proto.RegisterType((*ResponseWrapDataReply)(nil), "pb.ResponseWrapDataReply")
- proto.RegisterType((*MlockEnabledReply)(nil), "pb.MlockEnabledReply")
- proto.RegisterType((*LocalMountReply)(nil), "pb.LocalMountReply")
- proto.RegisterType((*EntityInfoArgs)(nil), "pb.EntityInfoArgs")
- proto.RegisterType((*EntityInfoReply)(nil), "pb.EntityInfoReply")
- proto.RegisterType((*PluginEnvReply)(nil), "pb.PluginEnvReply")
- proto.RegisterType((*Connection)(nil), "pb.Connection")
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// BackendClient is the client API for Backend service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type BackendClient interface {
- // HandleRequest is used to handle a request and generate a response.
- // The plugins must check the operation type and handle appropriately.
- HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error)
- // SpecialPaths is a list of paths that are special in some way.
- // See PathType for the types of special paths. The key is the type
- // of the special path, and the value is a list of paths for this type.
- // This is not a regular expression but is an exact match. If the path
- // ends in '*' then it is a prefix-based match. The '*' can only appear
- // at the end.
- SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error)
- // HandleExistenceCheck is used to handle a request and generate a response
- // indicating whether the given path exists or not; this is used to
- // understand whether the request must have a Create or Update capability
- // ACL applied. The first bool indicates whether an existence check
- // function was found for the backend; the second indicates whether, if an
- // existence check function was found, the item exists or not.
- HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error)
- // Cleanup is invoked during an unmount of a backend to allow it to
- // handle any cleanup like connection closing or releasing of file handles.
- // Cleanup is called right before Vault closes the plugin process.
- Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
- // InvalidateKey may be invoked when an object is modified that belongs
- // to the backend. The backend can use this to clear any caches or reset
- // internal state as needed.
- InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error)
- // Setup is used to set up the backend based on the provided backend
- // configuration. The plugin's setup implementation should use the provided
- // broker_id to create a connection back to Vault for use with the Storage
- // and SystemView clients.
- Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error)
- // Type returns the BackendType for the particular backend
- Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error)
-}
-
-type backendClient struct {
- cc *grpc.ClientConn
-}
-
-func NewBackendClient(cc *grpc.ClientConn) BackendClient {
- return &backendClient{cc}
-}
-
-func (c *backendClient) HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error) {
- out := new(HandleRequestReply)
- err := c.cc.Invoke(ctx, "/pb.Backend/HandleRequest", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *backendClient) SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error) {
- out := new(SpecialPathsReply)
- err := c.cc.Invoke(ctx, "/pb.Backend/SpecialPaths", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *backendClient) HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error) {
- out := new(HandleExistenceCheckReply)
- err := c.cc.Invoke(ctx, "/pb.Backend/HandleExistenceCheck", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *backendClient) Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
- out := new(Empty)
- err := c.cc.Invoke(ctx, "/pb.Backend/Cleanup", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *backendClient) InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error) {
- out := new(Empty)
- err := c.cc.Invoke(ctx, "/pb.Backend/InvalidateKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *backendClient) Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error) {
- out := new(SetupReply)
- err := c.cc.Invoke(ctx, "/pb.Backend/Setup", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *backendClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error) {
- out := new(TypeReply)
- err := c.cc.Invoke(ctx, "/pb.Backend/Type", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// BackendServer is the server API for Backend service.
-type BackendServer interface {
- // HandleRequest is used to handle a request and generate a response.
- // The plugins must check the operation type and handle appropriately.
- HandleRequest(context.Context, *HandleRequestArgs) (*HandleRequestReply, error)
- // SpecialPaths is a list of paths that are special in some way.
- // See PathType for the types of special paths. The key is the type
- // of the special path, and the value is a list of paths for this type.
- // This is not a regular expression but is an exact match. If the path
- // ends in '*' then it is a prefix-based match. The '*' can only appear
- // at the end.
- SpecialPaths(context.Context, *Empty) (*SpecialPathsReply, error)
- // HandleExistenceCheck is used to handle a request and generate a response
- // indicating whether the given path exists or not; this is used to
- // understand whether the request must have a Create or Update capability
- // ACL applied. The first bool indicates whether an existence check
- // function was found for the backend; the second indicates whether, if an
- // existence check function was found, the item exists or not.
- HandleExistenceCheck(context.Context, *HandleExistenceCheckArgs) (*HandleExistenceCheckReply, error)
- // Cleanup is invoked during an unmount of a backend to allow it to
- // handle any cleanup like connection closing or releasing of file handles.
- // Cleanup is called right before Vault closes the plugin process.
- Cleanup(context.Context, *Empty) (*Empty, error)
- // InvalidateKey may be invoked when an object is modified that belongs
- // to the backend. The backend can use this to clear any caches or reset
- // internal state as needed.
- InvalidateKey(context.Context, *InvalidateKeyArgs) (*Empty, error)
- // Setup is used to set up the backend based on the provided backend
- // configuration. The plugin's setup implementation should use the provided
- // broker_id to create a connection back to Vault for use with the Storage
- // and SystemView clients.
- Setup(context.Context, *SetupArgs) (*SetupReply, error)
- // Type returns the BackendType for the particular backend
- Type(context.Context, *Empty) (*TypeReply, error)
-}
-
-func RegisterBackendServer(s *grpc.Server, srv BackendServer) {
- s.RegisterService(&_Backend_serviceDesc, srv)
-}
-
-func _Backend_HandleRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HandleRequestArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(BackendServer).HandleRequest(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Backend/HandleRequest",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(BackendServer).HandleRequest(ctx, req.(*HandleRequestArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Backend_SpecialPaths_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(BackendServer).SpecialPaths(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Backend/SpecialPaths",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(BackendServer).SpecialPaths(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Backend_HandleExistenceCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HandleExistenceCheckArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(BackendServer).HandleExistenceCheck(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Backend/HandleExistenceCheck",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(BackendServer).HandleExistenceCheck(ctx, req.(*HandleExistenceCheckArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Backend_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(BackendServer).Cleanup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Backend/Cleanup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(BackendServer).Cleanup(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Backend_InvalidateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(InvalidateKeyArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(BackendServer).InvalidateKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Backend/InvalidateKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(BackendServer).InvalidateKey(ctx, req.(*InvalidateKeyArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Backend_Setup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SetupArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(BackendServer).Setup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Backend/Setup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(BackendServer).Setup(ctx, req.(*SetupArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Backend_Type_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(BackendServer).Type(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Backend/Type",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(BackendServer).Type(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Backend_serviceDesc = grpc.ServiceDesc{
- ServiceName: "pb.Backend",
- HandlerType: (*BackendServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "HandleRequest",
- Handler: _Backend_HandleRequest_Handler,
- },
- {
- MethodName: "SpecialPaths",
- Handler: _Backend_SpecialPaths_Handler,
- },
- {
- MethodName: "HandleExistenceCheck",
- Handler: _Backend_HandleExistenceCheck_Handler,
- },
- {
- MethodName: "Cleanup",
- Handler: _Backend_Cleanup_Handler,
- },
- {
- MethodName: "InvalidateKey",
- Handler: _Backend_InvalidateKey_Handler,
- },
- {
- MethodName: "Setup",
- Handler: _Backend_Setup_Handler,
- },
- {
- MethodName: "Type",
- Handler: _Backend_Type_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "logical/plugin/pb/backend.proto",
-}
-
-// StorageClient is the client API for Storage service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type StorageClient interface {
- List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error)
- Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error)
- Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error)
- Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error)
-}
-
-type storageClient struct {
- cc *grpc.ClientConn
-}
-
-func NewStorageClient(cc *grpc.ClientConn) StorageClient {
- return &storageClient{cc}
-}
-
-func (c *storageClient) List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) {
- out := new(StorageListReply)
- err := c.cc.Invoke(ctx, "/pb.Storage/List", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) {
- out := new(StorageGetReply)
- err := c.cc.Invoke(ctx, "/pb.Storage/Get", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error) {
- out := new(StoragePutReply)
- err := c.cc.Invoke(ctx, "/pb.Storage/Put", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error) {
- out := new(StorageDeleteReply)
- err := c.cc.Invoke(ctx, "/pb.Storage/Delete", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// StorageServer is the server API for Storage service.
-type StorageServer interface {
- List(context.Context, *StorageListArgs) (*StorageListReply, error)
- Get(context.Context, *StorageGetArgs) (*StorageGetReply, error)
- Put(context.Context, *StoragePutArgs) (*StoragePutReply, error)
- Delete(context.Context, *StorageDeleteArgs) (*StorageDeleteReply, error)
-}
-
-func RegisterStorageServer(s *grpc.Server, srv StorageServer) {
- s.RegisterService(&_Storage_serviceDesc, srv)
-}
-
-func _Storage_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(StorageListArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).List(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Storage/List",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).List(ctx, req.(*StorageListArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(StorageGetArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).Get(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Storage/Get",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).Get(ctx, req.(*StorageGetArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(StoragePutArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).Put(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Storage/Put",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).Put(ctx, req.(*StoragePutArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(StorageDeleteArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).Delete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.Storage/Delete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).Delete(ctx, req.(*StorageDeleteArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Storage_serviceDesc = grpc.ServiceDesc{
- ServiceName: "pb.Storage",
- HandlerType: (*StorageServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "List",
- Handler: _Storage_List_Handler,
- },
- {
- MethodName: "Get",
- Handler: _Storage_Get_Handler,
- },
- {
- MethodName: "Put",
- Handler: _Storage_Put_Handler,
- },
- {
- MethodName: "Delete",
- Handler: _Storage_Delete_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "logical/plugin/pb/backend.proto",
-}
-
-// SystemViewClient is the client API for SystemView service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type SystemViewClient interface {
- // DefaultLeaseTTL returns the default lease TTL set in Vault configuration
- DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error)
- // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend
- // authors should take care not to issue credentials that last longer than
- // this value, as Vault will revoke them
- MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error)
- // SudoPrivilege returns true if given path has sudo privileges
- // for the given client token
- SudoPrivilege(ctx context.Context, in *SudoPrivilegeArgs, opts ...grpc.CallOption) (*SudoPrivilegeReply, error)
- // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the
- // process of being unmounted. This should only be used in special
- // circumstances; a primary use-case is as a guard in revocation functions.
- // If revocation of a backend's leases fails it can keep the unmounting
- // process from being successful. If the reason for this failure is not
- // relevant when the mount is tainted (for instance, saving a CRL to disk
- // when the stored CRL will be removed during the unmounting process
- // anyways), we can ignore the errors to allow unmounting to complete.
- Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error)
- // CachingDisabled returns true if caching is disabled. If true, no caches
- // should be used, despite known slowdowns.
- CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error)
- // ReplicationState indicates the state of cluster replication
- ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error)
- // ResponseWrapData wraps the given data in a cubbyhole and returns the
- // token used to unwrap.
- ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error)
- // MlockEnabled returns the configuration setting for enabling mlock on
- // plugins.
- MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error)
- // LocalMount, when run from a system view attached to a request, indicates
- // whether the request is affecting a local mount or not
- LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error)
- // EntityInfo returns the basic entity information for the given entity id
- EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error)
- // PluginEnv returns Vault environment information used by plugins
- PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error)
-}
-
-type systemViewClient struct {
- cc *grpc.ClientConn
-}
-
-func NewSystemViewClient(cc *grpc.ClientConn) SystemViewClient {
- return &systemViewClient{cc}
-}
-
-func (c *systemViewClient) DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) {
- out := new(TTLReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/DefaultLeaseTTL", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) {
- out := new(TTLReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/MaxLeaseTTL", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) SudoPrivilege(ctx context.Context, in *SudoPrivilegeArgs, opts ...grpc.CallOption) (*SudoPrivilegeReply, error) {
- out := new(SudoPrivilegeReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/SudoPrivilege", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error) {
- out := new(TaintedReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/Tainted", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error) {
- out := new(CachingDisabledReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/CachingDisabled", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error) {
- out := new(ReplicationStateReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/ReplicationState", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error) {
- out := new(ResponseWrapDataReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/ResponseWrapData", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error) {
- out := new(MlockEnabledReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/MlockEnabled", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error) {
- out := new(LocalMountReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/LocalMount", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error) {
- out := new(EntityInfoReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/EntityInfo", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *systemViewClient) PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error) {
- out := new(PluginEnvReply)
- err := c.cc.Invoke(ctx, "/pb.SystemView/PluginEnv", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// SystemViewServer is the server API for SystemView service.
-type SystemViewServer interface {
- // DefaultLeaseTTL returns the default lease TTL set in Vault configuration
- DefaultLeaseTTL(context.Context, *Empty) (*TTLReply, error)
- // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend
- // authors should take care not to issue credentials that last longer than
- // this value, as Vault will revoke them
- MaxLeaseTTL(context.Context, *Empty) (*TTLReply, error)
- // SudoPrivilege returns true if given path has sudo privileges
- // for the given client token
- SudoPrivilege(context.Context, *SudoPrivilegeArgs) (*SudoPrivilegeReply, error)
- // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the
- // process of being unmounted. This should only be used in special
- // circumstances; a primary use-case is as a guard in revocation functions.
- // If revocation of a backend's leases fails it can keep the unmounting
- // process from being successful. If the reason for this failure is not
- // relevant when the mount is tainted (for instance, saving a CRL to disk
- // when the stored CRL will be removed during the unmounting process
- // anyways), we can ignore the errors to allow unmounting to complete.
- Tainted(context.Context, *Empty) (*TaintedReply, error)
- // CachingDisabled returns true if caching is disabled. If true, no caches
- // should be used, despite known slowdowns.
- CachingDisabled(context.Context, *Empty) (*CachingDisabledReply, error)
- // ReplicationState indicates the state of cluster replication
- ReplicationState(context.Context, *Empty) (*ReplicationStateReply, error)
- // ResponseWrapData wraps the given data in a cubbyhole and returns the
- // token used to unwrap.
- ResponseWrapData(context.Context, *ResponseWrapDataArgs) (*ResponseWrapDataReply, error)
- // MlockEnabled returns the configuration setting for enabling mlock on
- // plugins.
- MlockEnabled(context.Context, *Empty) (*MlockEnabledReply, error)
- // LocalMount, when run from a system view attached to a request, indicates
- // whether the request is affecting a local mount or not
- LocalMount(context.Context, *Empty) (*LocalMountReply, error)
- // EntityInfo returns the basic entity information for the given entity id
- EntityInfo(context.Context, *EntityInfoArgs) (*EntityInfoReply, error)
- // PluginEnv returns Vault environment information used by plugins
- PluginEnv(context.Context, *Empty) (*PluginEnvReply, error)
-}
-
-func RegisterSystemViewServer(s *grpc.Server, srv SystemViewServer) {
- s.RegisterService(&_SystemView_serviceDesc, srv)
-}
-
-func _SystemView_DefaultLeaseTTL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).DefaultLeaseTTL(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/DefaultLeaseTTL",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).DefaultLeaseTTL(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_MaxLeaseTTL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).MaxLeaseTTL(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/MaxLeaseTTL",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).MaxLeaseTTL(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_SudoPrivilege_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SudoPrivilegeArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).SudoPrivilege(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/SudoPrivilege",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).SudoPrivilege(ctx, req.(*SudoPrivilegeArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_Tainted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).Tainted(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/Tainted",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).Tainted(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_CachingDisabled_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).CachingDisabled(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/CachingDisabled",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).CachingDisabled(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_ReplicationState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).ReplicationState(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/ReplicationState",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).ReplicationState(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_ResponseWrapData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ResponseWrapDataArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).ResponseWrapData(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/ResponseWrapData",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).ResponseWrapData(ctx, req.(*ResponseWrapDataArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_MlockEnabled_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).MlockEnabled(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/MlockEnabled",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).MlockEnabled(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_LocalMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).LocalMount(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/LocalMount",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).LocalMount(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_EntityInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(EntityInfoArgs)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).EntityInfo(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/EntityInfo",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).EntityInfo(ctx, req.(*EntityInfoArgs))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _SystemView_PluginEnv_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SystemViewServer).PluginEnv(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/pb.SystemView/PluginEnv",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SystemViewServer).PluginEnv(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _SystemView_serviceDesc = grpc.ServiceDesc{
- ServiceName: "pb.SystemView",
- HandlerType: (*SystemViewServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "DefaultLeaseTTL",
- Handler: _SystemView_DefaultLeaseTTL_Handler,
- },
- {
- MethodName: "MaxLeaseTTL",
- Handler: _SystemView_MaxLeaseTTL_Handler,
- },
- {
- MethodName: "SudoPrivilege",
- Handler: _SystemView_SudoPrivilege_Handler,
- },
- {
- MethodName: "Tainted",
- Handler: _SystemView_Tainted_Handler,
- },
- {
- MethodName: "CachingDisabled",
- Handler: _SystemView_CachingDisabled_Handler,
- },
- {
- MethodName: "ReplicationState",
- Handler: _SystemView_ReplicationState_Handler,
- },
- {
- MethodName: "ResponseWrapData",
- Handler: _SystemView_ResponseWrapData_Handler,
- },
- {
- MethodName: "MlockEnabled",
- Handler: _SystemView_MlockEnabled_Handler,
- },
- {
- MethodName: "LocalMount",
- Handler: _SystemView_LocalMount_Handler,
- },
- {
- MethodName: "EntityInfo",
- Handler: _SystemView_EntityInfo_Handler,
- },
- {
- MethodName: "PluginEnv",
- Handler: _SystemView_PluginEnv_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "logical/plugin/pb/backend.proto",
-}
-
-func init() { proto.RegisterFile("logical/plugin/pb/backend.proto", fileDescriptor_25821d34acc7c5ef) }
-
-var fileDescriptor_25821d34acc7c5ef = []byte{
- // 2483 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0xcd, 0x72, 0x1b, 0xc7,
- 0x11, 0x2e, 0x00, 0xc4, 0x5f, 0xe3, 0x8f, 0x18, 0xd1, 0xcc, 0x0a, 0x96, 0x23, 0x78, 0x1d, 0x49,
- 0xb4, 0x22, 0x81, 0x12, 0x1d, 0xc7, 0x72, 0x52, 0x76, 0x8a, 0xa6, 0x68, 0x99, 0x31, 0x69, 0xb3,
- 0x96, 0x50, 0x9c, 0xbf, 0x2a, 0x78, 0xb0, 0x3b, 0x04, 0xb7, 0xb8, 0xd8, 0xdd, 0xcc, 0xce, 0x52,
- 0xc4, 0x29, 0x6f, 0x91, 0xd7, 0xc8, 0x35, 0x95, 0x4b, 0x6e, 0xa9, 0x54, 0x72, 0xce, 0x6b, 0xe4,
- 0x19, 0x52, 0xd3, 0x33, 0xfb, 0x07, 0x80, 0x96, 0x5c, 0x95, 0xdc, 0x66, 0xba, 0x7b, 0x7a, 0x66,
- 0x7a, 0xbe, 0xfe, 0xba, 0x17, 0x80, 0xbb, 0x5e, 0x30, 0x73, 0x6d, 0xea, 0xed, 0x86, 0x5e, 0x3c,
- 0x73, 0xfd, 0xdd, 0x70, 0xba, 0x3b, 0xa5, 0xf6, 0x25, 0xf3, 0x9d, 0x51, 0xc8, 0x03, 0x11, 0x90,
- 0x72, 0x38, 0x1d, 0xdc, 0x9d, 0x05, 0xc1, 0xcc, 0x63, 0xbb, 0x28, 0x99, 0xc6, 0xe7, 0xbb, 0xc2,
- 0x9d, 0xb3, 0x48, 0xd0, 0x79, 0xa8, 0x8c, 0x06, 0xdb, 0x89, 0x17, 0xd7, 0x61, 0xbe, 0x70, 0xc5,
- 0x42, 0xcb, 0xb7, 0x8a, 0xde, 0x95, 0xd4, 0xac, 0x43, 0xf5, 0x70, 0x1e, 0x8a, 0x85, 0x39, 0x84,
- 0xda, 0x17, 0x8c, 0x3a, 0x8c, 0x93, 0x6d, 0xa8, 0x5d, 0xe0, 0xc8, 0x28, 0x0d, 0x2b, 0x3b, 0x4d,
- 0x4b, 0xcf, 0xcc, 0xdf, 0x01, 0x9c, 0xca, 0x35, 0x87, 0x9c, 0x07, 0x9c, 0xdc, 0x86, 0x06, 0xe3,
- 0x7c, 0x22, 0x16, 0x21, 0x33, 0x4a, 0xc3, 0xd2, 0x4e, 0xc7, 0xaa, 0x33, 0xce, 0xc7, 0x8b, 0x90,
- 0x91, 0x1f, 0x80, 0x1c, 0x4e, 0xe6, 0xd1, 0xcc, 0x28, 0x0f, 0x4b, 0xd2, 0x03, 0xe3, 0xfc, 0x24,
- 0x9a, 0x25, 0x6b, 0xec, 0xc0, 0x61, 0x46, 0x65, 0x58, 0xda, 0xa9, 0xe0, 0x9a, 0x83, 0xc0, 0x61,
- 0xe6, 0x9f, 0x4a, 0x50, 0x3d, 0xa5, 0xe2, 0x22, 0x22, 0x04, 0x36, 0x78, 0x10, 0x08, 0xbd, 0x39,
- 0x8e, 0xc9, 0x0e, 0xf4, 0x62, 0x9f, 0xc6, 0xe2, 0x42, 0xde, 0xc8, 0xa6, 0x82, 0x39, 0x46, 0x19,
- 0xd5, 0xcb, 0x62, 0xf2, 0x1e, 0x74, 0xbc, 0xc0, 0xa6, 0xde, 0x24, 0x12, 0x01, 0xa7, 0x33, 0xb9,
- 0x8f, 0xb4, 0x6b, 0xa3, 0xf0, 0x4c, 0xc9, 0xc8, 0x43, 0xe8, 0x47, 0x8c, 0x7a, 0x93, 0x57, 0x9c,
- 0x86, 0xa9, 0xe1, 0x86, 0x72, 0x28, 0x15, 0xdf, 0x70, 0x1a, 0x6a, 0x5b, 0xf3, 0x6f, 0x35, 0xa8,
- 0x5b, 0xec, 0x0f, 0x31, 0x8b, 0x04, 0xe9, 0x42, 0xd9, 0x75, 0xf0, 0xb6, 0x4d, 0xab, 0xec, 0x3a,
- 0x64, 0x04, 0xc4, 0x62, 0xa1, 0x27, 0xb7, 0x76, 0x03, 0xff, 0xc0, 0x8b, 0x23, 0xc1, 0xb8, 0xbe,
- 0xf3, 0x1a, 0x0d, 0xb9, 0x03, 0xcd, 0x20, 0x64, 0x1c, 0x65, 0x18, 0x80, 0xa6, 0x95, 0x09, 0xe4,
- 0xc5, 0x43, 0x2a, 0x2e, 0x8c, 0x0d, 0x54, 0xe0, 0x58, 0xca, 0x1c, 0x2a, 0xa8, 0x51, 0x55, 0x32,
- 0x39, 0x26, 0x26, 0xd4, 0x22, 0x66, 0x73, 0x26, 0x8c, 0xda, 0xb0, 0xb4, 0xd3, 0xda, 0x83, 0x51,
- 0x38, 0x1d, 0x9d, 0xa1, 0xc4, 0xd2, 0x1a, 0x72, 0x07, 0x36, 0x64, 0x5c, 0x8c, 0x3a, 0x5a, 0x34,
- 0xa4, 0xc5, 0x7e, 0x2c, 0x2e, 0x2c, 0x94, 0x92, 0x3d, 0xa8, 0xab, 0x37, 0x8d, 0x8c, 0xc6, 0xb0,
- 0xb2, 0xd3, 0xda, 0x33, 0xa4, 0x81, 0xbe, 0xe5, 0x48, 0xc1, 0x20, 0x3a, 0xf4, 0x05, 0x5f, 0x58,
- 0x89, 0x21, 0x79, 0x17, 0xda, 0xb6, 0xe7, 0x32, 0x5f, 0x4c, 0x44, 0x70, 0xc9, 0x7c, 0xa3, 0x89,
- 0x27, 0x6a, 0x29, 0xd9, 0x58, 0x8a, 0xc8, 0x1e, 0xbc, 0x95, 0x37, 0x99, 0x50, 0xdb, 0x66, 0x51,
- 0x14, 0x70, 0x03, 0xd0, 0xf6, 0x56, 0xce, 0x76, 0x5f, 0xab, 0xa4, 0x5b, 0xc7, 0x8d, 0x42, 0x8f,
- 0x2e, 0x26, 0x3e, 0x9d, 0x33, 0xa3, 0xa5, 0xdc, 0x6a, 0xd9, 0x57, 0x74, 0xce, 0xc8, 0x5d, 0x68,
- 0xcd, 0x83, 0xd8, 0x17, 0x93, 0x30, 0x70, 0x7d, 0x61, 0xb4, 0xd1, 0x02, 0x50, 0x74, 0x2a, 0x25,
- 0xe4, 0x1d, 0x50, 0x33, 0x05, 0xc6, 0x8e, 0x8a, 0x2b, 0x4a, 0x10, 0x8e, 0xf7, 0xa0, 0xab, 0xd4,
- 0xe9, 0x79, 0xba, 0x68, 0xd2, 0x41, 0x69, 0x7a, 0x92, 0x27, 0xd0, 0x44, 0x3c, 0xb8, 0xfe, 0x79,
- 0x60, 0xf4, 0x30, 0x6e, 0xb7, 0x72, 0x61, 0x91, 0x98, 0x38, 0xf2, 0xcf, 0x03, 0xab, 0xf1, 0x4a,
- 0x8f, 0xc8, 0x27, 0xf0, 0x76, 0xe1, 0xbe, 0x9c, 0xcd, 0xa9, 0xeb, 0xbb, 0xfe, 0x6c, 0x12, 0x47,
- 0x2c, 0x32, 0x36, 0x11, 0xe1, 0x46, 0xee, 0xd6, 0x56, 0x62, 0xf0, 0x32, 0x62, 0x11, 0x79, 0x1b,
- 0x9a, 0x2a, 0x41, 0x27, 0xae, 0x63, 0xf4, 0xf1, 0x48, 0x0d, 0x25, 0x38, 0x72, 0xc8, 0x03, 0xe8,
- 0x85, 0x81, 0xe7, 0xda, 0x8b, 0x49, 0x70, 0xc5, 0x38, 0x77, 0x1d, 0x66, 0x90, 0x61, 0x69, 0xa7,
- 0x61, 0x75, 0x95, 0xf8, 0x6b, 0x2d, 0x5d, 0x97, 0x1a, 0xb7, 0xd0, 0x70, 0x25, 0x35, 0x46, 0x00,
- 0x76, 0xe0, 0xfb, 0xcc, 0x46, 0xf8, 0x6d, 0xe1, 0x0d, 0xbb, 0xf2, 0x86, 0x07, 0xa9, 0xd4, 0xca,
- 0x59, 0x0c, 0x3e, 0x87, 0x76, 0x1e, 0x0a, 0x64, 0x13, 0x2a, 0x97, 0x6c, 0xa1, 0xe1, 0x2f, 0x87,
- 0x64, 0x08, 0xd5, 0x2b, 0xea, 0xc5, 0x0c, 0x21, 0xaf, 0x81, 0xa8, 0x96, 0x58, 0x4a, 0xf1, 0xb3,
- 0xf2, 0xb3, 0x92, 0xf9, 0xd7, 0x2a, 0x6c, 0x48, 0xf0, 0x91, 0x0f, 0xa1, 0xe3, 0x31, 0x1a, 0xb1,
- 0x49, 0x10, 0xca, 0x0d, 0x22, 0x74, 0xd5, 0xda, 0xdb, 0x94, 0xcb, 0x8e, 0xa5, 0xe2, 0x6b, 0x25,
- 0xb7, 0xda, 0x5e, 0x6e, 0x26, 0x53, 0xda, 0xf5, 0x05, 0xe3, 0x3e, 0xf5, 0x26, 0x98, 0x0c, 0x2a,
- 0xc1, 0xda, 0x89, 0xf0, 0xb9, 0x4c, 0x8a, 0x65, 0x1c, 0x55, 0x56, 0x71, 0x34, 0x80, 0x06, 0xc6,
- 0xce, 0x65, 0x91, 0x4e, 0xf6, 0x74, 0x4e, 0xf6, 0xa0, 0x31, 0x67, 0x82, 0xea, 0x5c, 0x93, 0x29,
- 0xb1, 0x9d, 0xe4, 0xcc, 0xe8, 0x44, 0x2b, 0x54, 0x42, 0xa4, 0x76, 0x2b, 0x19, 0x51, 0x5b, 0xcd,
- 0x88, 0x01, 0x34, 0x52, 0xd0, 0xd5, 0xd5, 0x0b, 0x27, 0x73, 0x49, 0xb3, 0x21, 0xe3, 0x6e, 0xe0,
- 0x18, 0x0d, 0x04, 0x8a, 0x9e, 0x49, 0x92, 0xf4, 0xe3, 0xb9, 0x82, 0x50, 0x53, 0x91, 0xa4, 0x1f,
- 0xcf, 0x57, 0x11, 0x03, 0x4b, 0x88, 0xf9, 0x11, 0x54, 0xa9, 0xe7, 0xd2, 0x08, 0x53, 0x48, 0xbe,
- 0xac, 0xe6, 0xfb, 0xd1, 0xbe, 0x94, 0x5a, 0x4a, 0x49, 0x3e, 0x80, 0xce, 0x8c, 0x07, 0x71, 0x38,
- 0xc1, 0x29, 0x8b, 0x8c, 0x36, 0xde, 0x76, 0xd9, 0xba, 0x8d, 0x46, 0xfb, 0xca, 0x46, 0x66, 0xe0,
- 0x34, 0x88, 0x7d, 0x67, 0x62, 0xbb, 0x0e, 0x8f, 0x8c, 0x0e, 0x06, 0x0f, 0x50, 0x74, 0x20, 0x25,
- 0x32, 0xc5, 0x54, 0x0a, 0xa4, 0x01, 0xee, 0xa2, 0x4d, 0x07, 0xa5, 0xa7, 0x49, 0x94, 0x7f, 0x0c,
- 0xfd, 0xa4, 0x28, 0x65, 0x96, 0x3d, 0xb4, 0xdc, 0x4c, 0x14, 0xa9, 0xf1, 0x0e, 0x6c, 0xb2, 0x6b,
- 0x49, 0xa1, 0xae, 0x98, 0xcc, 0xe9, 0xf5, 0x44, 0x08, 0x4f, 0xa7, 0x54, 0x37, 0x91, 0x9f, 0xd0,
- 0xeb, 0xb1, 0xf0, 0x64, 0xfe, 0xab, 0xdd, 0x31, 0xff, 0xfb, 0x58, 0x8c, 0x9a, 0x28, 0x91, 0xf9,
- 0x3f, 0xf8, 0x39, 0x74, 0x0a, 0x4f, 0xb8, 0x06, 0xc8, 0x5b, 0x79, 0x20, 0x37, 0xf3, 0xe0, 0xfd,
- 0xe7, 0x06, 0x00, 0xbe, 0xa5, 0x5a, 0xba, 0x5c, 0x01, 0xf2, 0x0f, 0x5c, 0x5e, 0xf3, 0xc0, 0x94,
- 0x33, 0x5f, 0x68, 0x30, 0xea, 0xd9, 0x77, 0xe2, 0x30, 0xa9, 0x01, 0xd5, 0x5c, 0x0d, 0x78, 0x04,
- 0x1b, 0x12, 0x73, 0x46, 0x2d, 0xa3, 0xea, 0xec, 0x44, 0x88, 0x4e, 0x85, 0x4c, 0xb4, 0x5a, 0x49,
- 0x84, 0xfa, 0x6a, 0x22, 0xe4, 0x11, 0xd6, 0x28, 0x22, 0xec, 0x3d, 0xe8, 0xd8, 0x9c, 0x61, 0x3d,
- 0x9a, 0xc8, 0xc6, 0x42, 0x23, 0xb0, 0x9d, 0x08, 0xc7, 0xee, 0x9c, 0xc9, 0xf8, 0xc9, 0xc7, 0x00,
- 0x54, 0xc9, 0xe1, 0xda, 0xb7, 0x6a, 0xad, 0x7d, 0x2b, 0xac, 0xee, 0x1e, 0xd3, 0x2c, 0x8e, 0xe3,
- 0x5c, 0x26, 0x74, 0x0a, 0x99, 0x50, 0x80, 0x7b, 0x77, 0x09, 0xee, 0x4b, 0x98, 0xec, 0xad, 0x60,
- 0xf2, 0x5d, 0x68, 0xcb, 0x00, 0x44, 0x21, 0xb5, 0x99, 0x74, 0xb0, 0xa9, 0x02, 0x91, 0xca, 0x8e,
- 0x1c, 0xcc, 0xe0, 0x78, 0x3a, 0x5d, 0x5c, 0x04, 0x1e, 0xcb, 0x48, 0xb8, 0x95, 0xca, 0x8e, 0x1c,
- 0x79, 0x5e, 0x44, 0x15, 0x41, 0x54, 0xe1, 0x78, 0xf0, 0x11, 0x34, 0xd3, 0xa8, 0x7f, 0x2f, 0x30,
- 0xfd, 0xb9, 0x04, 0xed, 0x3c, 0xd1, 0xc9, 0xc5, 0xe3, 0xf1, 0x31, 0x2e, 0xae, 0x58, 0x72, 0x28,
- 0x5b, 0x04, 0xce, 0x7c, 0xf6, 0x8a, 0x4e, 0x3d, 0xe5, 0xa0, 0x61, 0x65, 0x02, 0xa9, 0x75, 0x7d,
- 0x9b, 0xb3, 0x79, 0x82, 0xaa, 0x8a, 0x95, 0x09, 0xc8, 0xc7, 0x00, 0x6e, 0x14, 0xc5, 0x4c, 0xbd,
- 0xdc, 0x06, 0xd2, 0xc0, 0x60, 0xa4, 0xfa, 0xc5, 0x51, 0xd2, 0x2f, 0x8e, 0xc6, 0x49, 0xbf, 0x68,
- 0x35, 0xd1, 0x1a, 0x9f, 0x74, 0x1b, 0x6a, 0xf2, 0x81, 0xc6, 0xc7, 0x88, 0xbc, 0x8a, 0xa5, 0x67,
- 0xe6, 0x1f, 0xa1, 0xa6, 0x3a, 0x8b, 0xff, 0x2b, 0x79, 0xdf, 0x86, 0x86, 0xf2, 0xed, 0x3a, 0x3a,
- 0x57, 0xea, 0x38, 0x3f, 0x72, 0xcc, 0x7f, 0x95, 0xa0, 0x61, 0xb1, 0x28, 0x0c, 0xfc, 0x88, 0xe5,
- 0x3a, 0x9f, 0xd2, 0x6b, 0x3b, 0x9f, 0xf2, 0xda, 0xce, 0x27, 0xe9, 0xa7, 0x2a, 0xb9, 0x7e, 0x6a,
- 0x00, 0x0d, 0xce, 0x1c, 0x97, 0x33, 0x5b, 0xe8, 0xde, 0x2b, 0x9d, 0x4b, 0xdd, 0x2b, 0xca, 0x65,
- 0xc9, 0x8e, 0xb0, 0x2e, 0x34, 0xad, 0x74, 0x4e, 0x9e, 0xe6, 0x1b, 0x06, 0xd5, 0x8a, 0x6d, 0xa9,
- 0x86, 0x41, 0x1d, 0x77, 0xb5, 0x63, 0x30, 0xff, 0x51, 0x86, 0xcd, 0x65, 0xf5, 0x1a, 0x10, 0x6c,
- 0x41, 0x55, 0x95, 0x14, 0x8d, 0x20, 0xb1, 0x52, 0x4c, 0x2a, 0x4b, 0x5c, 0xf3, 0x8b, 0xe5, 0xbc,
- 0x7d, 0xfd, 0xeb, 0x17, 0x73, 0xfa, 0x7d, 0xd8, 0x94, 0xa7, 0x0c, 0x99, 0x93, 0xb5, 0x49, 0x8a,
- 0x84, 0x7a, 0x5a, 0x9e, 0x36, 0x4a, 0x0f, 0xa1, 0x9f, 0x98, 0x66, 0xe9, 0x59, 0x2b, 0xd8, 0x1e,
- 0x26, 0x59, 0xba, 0x0d, 0xb5, 0xf3, 0x80, 0xcf, 0xa9, 0xd0, 0x3c, 0xa4, 0x67, 0x05, 0x9e, 0x41,
- 0xc2, 0x6b, 0x28, 0x58, 0x24, 0x42, 0xf9, 0x29, 0x20, 0xf3, 0x3f, 0x6d, 0xd3, 0x91, 0x88, 0x1a,
- 0x56, 0x23, 0x69, 0xcf, 0xcd, 0x5f, 0x43, 0x6f, 0xa9, 0x33, 0x5b, 0x13, 0xc8, 0x6c, 0xfb, 0x72,
- 0x61, 0xfb, 0x82, 0xe7, 0xca, 0x92, 0xe7, 0xdf, 0x40, 0xff, 0x0b, 0xea, 0x3b, 0x1e, 0xd3, 0xfe,
- 0xf7, 0xf9, 0x2c, 0x92, 0x35, 0x46, 0x7f, 0x28, 0x4c, 0x74, 0x01, 0xe8, 0x58, 0x4d, 0x2d, 0x39,
- 0x72, 0xc8, 0x3d, 0xa8, 0x73, 0x65, 0xad, 0x81, 0xd7, 0xca, 0xb5, 0x8e, 0x56, 0xa2, 0x33, 0xbf,
- 0x05, 0x52, 0x70, 0x2d, 0xbf, 0x11, 0x16, 0x64, 0x47, 0x02, 0x50, 0x81, 0x42, 0x03, 0xbb, 0x9d,
- 0xc7, 0x91, 0x95, 0x6a, 0xc9, 0x10, 0x2a, 0x8c, 0x73, 0xbd, 0x05, 0xf6, 0x6e, 0xd9, 0x17, 0x99,
- 0x25, 0x55, 0xe6, 0x4f, 0xa0, 0x7f, 0x16, 0x32, 0xdb, 0xa5, 0x1e, 0x7e, 0x4d, 0xa9, 0x0d, 0xee,
- 0x42, 0x55, 0x06, 0x39, 0xc9, 0xd9, 0x26, 0x2e, 0x44, 0xb5, 0x92, 0x9b, 0xdf, 0x82, 0xa1, 0xce,
- 0x75, 0x78, 0xed, 0x46, 0x82, 0xf9, 0x36, 0x3b, 0xb8, 0x60, 0xf6, 0xe5, 0xff, 0xf0, 0xe6, 0x57,
- 0x70, 0x7b, 0xdd, 0x0e, 0xc9, 0xf9, 0x5a, 0xb6, 0x9c, 0x4d, 0xce, 0x25, 0x7d, 0xe3, 0x1e, 0x0d,
- 0x0b, 0x50, 0xf4, 0xb9, 0x94, 0xc8, 0x77, 0x64, 0x72, 0x5d, 0xa4, 0x29, 0x51, 0xcf, 0x92, 0x78,
- 0x54, 0x6e, 0x8e, 0xc7, 0x5f, 0x4a, 0xd0, 0x3c, 0x63, 0x22, 0x0e, 0xf1, 0x2e, 0x6f, 0x43, 0x73,
- 0xca, 0x83, 0x4b, 0xc6, 0xb3, 0xab, 0x34, 0x94, 0xe0, 0xc8, 0x21, 0x4f, 0xa1, 0x76, 0x10, 0xf8,
- 0xe7, 0xee, 0x0c, 0xbf, 0x2d, 0x5b, 0x7b, 0xb7, 0x15, 0xbb, 0xe8, 0xb5, 0x23, 0xa5, 0x53, 0xa5,
- 0x56, 0x1b, 0x92, 0x21, 0xb4, 0xf4, 0x17, 0xfa, 0xcb, 0x97, 0x47, 0xcf, 0x93, 0xa6, 0x33, 0x27,
- 0x1a, 0x7c, 0x0c, 0xad, 0xdc, 0xc2, 0xef, 0x55, 0x2d, 0x7e, 0x08, 0x80, 0xbb, 0xab, 0x18, 0x6d,
- 0xaa, 0xab, 0xea, 0x95, 0xf2, 0x6a, 0x77, 0xa1, 0x29, 0xfb, 0x1b, 0xa5, 0x4e, 0xea, 0x54, 0x29,
- 0xab, 0x53, 0xe6, 0x3d, 0xe8, 0x1f, 0xf9, 0x57, 0xd4, 0x73, 0x1d, 0x2a, 0xd8, 0x97, 0x6c, 0x81,
- 0x21, 0x58, 0x39, 0x81, 0x79, 0x06, 0x6d, 0xfd, 0xb1, 0xfb, 0x46, 0x67, 0x6c, 0xeb, 0x33, 0x7e,
- 0x77, 0x12, 0xbd, 0x0f, 0x3d, 0xed, 0xf4, 0xd8, 0xd5, 0x29, 0x24, 0xcb, 0x3c, 0x67, 0xe7, 0xee,
- 0xb5, 0x76, 0xad, 0x67, 0xe6, 0x33, 0xd8, 0xcc, 0x99, 0xa6, 0xd7, 0xb9, 0x64, 0x8b, 0x28, 0xf9,
- 0x11, 0x40, 0x8e, 0x93, 0x08, 0x94, 0xb3, 0x08, 0x98, 0xd0, 0xd5, 0x2b, 0x5f, 0x30, 0x71, 0xc3,
- 0xed, 0xbe, 0x4c, 0x0f, 0xf2, 0x82, 0x69, 0xe7, 0xf7, 0xa1, 0xca, 0xe4, 0x4d, 0xf3, 0x25, 0x2c,
- 0x1f, 0x01, 0x4b, 0xa9, 0xd7, 0x6c, 0xf8, 0x2c, 0xdd, 0xf0, 0x34, 0x56, 0x1b, 0xbe, 0xa1, 0x2f,
- 0xf3, 0xbd, 0xf4, 0x18, 0xa7, 0xb1, 0xb8, 0xe9, 0x45, 0xef, 0x41, 0x5f, 0x1b, 0x3d, 0x67, 0x1e,
- 0x13, 0xec, 0x86, 0x2b, 0xdd, 0x07, 0x52, 0x30, 0xbb, 0xc9, 0xdd, 0x1d, 0x68, 0x8c, 0xc7, 0xc7,
- 0xa9, 0xb6, 0xc8, 0x8d, 0xe6, 0x27, 0xd0, 0x3f, 0x8b, 0x9d, 0xe0, 0x94, 0xbb, 0x57, 0xae, 0xc7,
- 0x66, 0x6a, 0xb3, 0xa4, 0xff, 0x2c, 0xe5, 0xfa, 0xcf, 0xb5, 0xd5, 0xc8, 0xdc, 0x01, 0x52, 0x58,
- 0x9e, 0xbe, 0x5b, 0x14, 0x3b, 0x81, 0x4e, 0x61, 0x1c, 0x9b, 0x3b, 0xd0, 0x1e, 0x53, 0x59, 0xef,
- 0x1d, 0x65, 0x63, 0x40, 0x5d, 0xa8, 0xb9, 0x36, 0x4b, 0xa6, 0xe6, 0x1e, 0x6c, 0x1d, 0x50, 0xfb,
- 0xc2, 0xf5, 0x67, 0xcf, 0xdd, 0x48, 0x36, 0x3c, 0x7a, 0xc5, 0x00, 0x1a, 0x8e, 0x16, 0xe8, 0x25,
- 0xe9, 0xdc, 0x7c, 0x0c, 0x6f, 0xe5, 0x7e, 0x69, 0x39, 0x13, 0x34, 0x89, 0xc7, 0x16, 0x54, 0x23,
- 0x39, 0xc3, 0x15, 0x55, 0x4b, 0x4d, 0xcc, 0xaf, 0x60, 0x2b, 0x5f, 0x80, 0x65, 0xfb, 0x91, 0x5c,
- 0x1c, 0x1b, 0x83, 0x52, 0xae, 0x31, 0xd0, 0x31, 0x2b, 0x67, 0xf5, 0x64, 0x13, 0x2a, 0xbf, 0xfc,
- 0x66, 0xac, 0xc1, 0x2e, 0x87, 0xe6, 0xef, 0xe5, 0xf6, 0x45, 0x7f, 0x6a, 0xfb, 0x42, 0x77, 0x50,
- 0x7a, 0x93, 0xee, 0x60, 0x0d, 0xde, 0x1e, 0x43, 0xff, 0xc4, 0x0b, 0xec, 0xcb, 0x43, 0x3f, 0x17,
- 0x0d, 0x03, 0xea, 0xcc, 0xcf, 0x07, 0x23, 0x99, 0x9a, 0x0f, 0xa0, 0x77, 0x1c, 0xd8, 0xd4, 0x3b,
- 0x09, 0x62, 0x5f, 0xa4, 0x51, 0xc0, 0x9f, 0xbe, 0xb4, 0xa9, 0x9a, 0x98, 0x8f, 0xa1, 0xab, 0x4b,
- 0xb4, 0x7f, 0x1e, 0x24, 0xcc, 0x98, 0x15, 0xf3, 0x52, 0xb1, 0xd7, 0x36, 0x8f, 0xa1, 0x97, 0x99,
- 0x2b, 0xbf, 0x0f, 0xa0, 0xa6, 0xd4, 0xfa, 0x6e, 0xbd, 0xf4, 0x03, 0x52, 0x59, 0x5a, 0x5a, 0xbd,
- 0xe6, 0x52, 0x73, 0xe8, 0x9e, 0xe2, 0x4f, 0x90, 0x87, 0xfe, 0x95, 0x72, 0x76, 0x04, 0x44, 0xfd,
- 0x28, 0x39, 0x61, 0xfe, 0x95, 0xcb, 0x03, 0x1f, 0xfb, 0xdb, 0x92, 0x6e, 0x61, 0x12, 0xc7, 0xe9,
- 0xa2, 0xc4, 0xc2, 0xea, 0x87, 0xcb, 0xa2, 0xb5, 0x31, 0x84, 0xec, 0x07, 0x0e, 0x59, 0x6a, 0x38,
- 0x9b, 0x07, 0x82, 0x4d, 0xa8, 0xe3, 0x24, 0xd9, 0x02, 0x4a, 0xb4, 0xef, 0x38, 0x7c, 0xef, 0x3f,
- 0x65, 0xa8, 0x7f, 0xa6, 0x08, 0x9c, 0x7c, 0x0a, 0x9d, 0x42, 0xb9, 0x26, 0x6f, 0xe1, 0x2f, 0x1c,
- 0xcb, 0xcd, 0xc1, 0x60, 0x7b, 0x45, 0xac, 0xee, 0xf5, 0x04, 0xda, 0xf9, 0x62, 0x4c, 0xb0, 0xf0,
- 0xe2, 0xcf, 0xad, 0x03, 0xf4, 0xb4, 0x5a, 0xa9, 0xcf, 0x60, 0x6b, 0x5d, 0x99, 0x24, 0x77, 0xb2,
- 0x1d, 0x56, 0x4b, 0xf4, 0xe0, 0x9d, 0x9b, 0xb4, 0x49, 0x79, 0xad, 0x1f, 0x78, 0x8c, 0xfa, 0x71,
- 0x98, 0x3f, 0x41, 0x36, 0x24, 0x4f, 0xa1, 0x53, 0x28, 0x14, 0xea, 0x9e, 0x2b, 0xb5, 0x23, 0xbf,
- 0xe4, 0x3e, 0x54, 0xb1, 0x38, 0x91, 0x4e, 0xa1, 0x4a, 0x0e, 0xba, 0xe9, 0x54, 0xed, 0x3d, 0x84,
- 0x0d, 0xfc, 0x11, 0x2e, 0xb7, 0x31, 0xae, 0x48, 0x2b, 0xd7, 0xde, 0xbf, 0x4b, 0x50, 0x4f, 0x7e,
- 0x98, 0x7d, 0x0a, 0x1b, 0xb2, 0x06, 0x90, 0x5b, 0x39, 0x1a, 0x4d, 0xea, 0xc7, 0x60, 0x6b, 0x49,
- 0xa8, 0x36, 0x18, 0x41, 0xe5, 0x05, 0x13, 0x84, 0xe4, 0x94, 0xba, 0x18, 0x0c, 0x6e, 0x15, 0x65,
- 0xa9, 0xfd, 0x69, 0x5c, 0xb4, 0xd7, 0x5c, 0x5e, 0xb0, 0x4f, 0x59, 0xfa, 0x23, 0xa8, 0x29, 0x96,
- 0x55, 0x41, 0x59, 0xe1, 0x67, 0xf5, 0xf8, 0xab, 0x7c, 0xbc, 0xf7, 0xf7, 0x0d, 0x80, 0xb3, 0x45,
- 0x24, 0xd8, 0xfc, 0x57, 0x2e, 0x7b, 0x45, 0x1e, 0x42, 0xef, 0x39, 0x3b, 0xa7, 0xb1, 0x27, 0xf0,
- 0x6b, 0x49, 0xb2, 0x49, 0x2e, 0x26, 0xd8, 0xf0, 0xa5, 0x64, 0x7d, 0x1f, 0x5a, 0x27, 0xf4, 0xfa,
- 0xf5, 0x76, 0x9f, 0x42, 0xa7, 0xc0, 0xc1, 0xfa, 0x88, 0xcb, 0xac, 0xae, 0x8f, 0xb8, 0xca, 0xd6,
- 0xf7, 0xa1, 0xae, 0x99, 0x39, 0xbf, 0x07, 0xd6, 0xb0, 0x02, 0x63, 0xff, 0x14, 0x7a, 0x4b, 0xbc,
- 0x9c, 0xb7, 0xc7, 0x5f, 0x24, 0xd6, 0xf2, 0xf6, 0x33, 0xf9, 0xb5, 0x53, 0xe4, 0xe6, 0xfc, 0xc2,
- 0xdb, 0x8a, 0x0f, 0xd7, 0x91, 0xf7, 0x8b, 0xe2, 0x77, 0x12, 0x7e, 0x25, 0x1a, 0xcb, 0xf4, 0x99,
- 0x90, 0x77, 0xe2, 0x68, 0x1d, 0x0d, 0x3f, 0x81, 0x76, 0x9e, 0x41, 0x57, 0x52, 0x70, 0x95, 0x5e,
- 0x1f, 0x01, 0x64, 0x24, 0x9a, 0xb7, 0x47, 0x78, 0x2c, 0xf3, 0xeb, 0x87, 0x00, 0x19, 0x35, 0x2a,
- 0x54, 0x15, 0x99, 0x55, 0x2d, 0x5b, 0xa6, 0xcf, 0x87, 0xd0, 0x4c, 0xe9, 0x2c, 0xbf, 0x07, 0x3a,
- 0x28, 0xb2, 0xe3, 0x67, 0xa3, 0xdf, 0x3e, 0x9a, 0xb9, 0xe2, 0x22, 0x9e, 0x8e, 0xec, 0x60, 0xbe,
- 0x7b, 0x41, 0xa3, 0x0b, 0xd7, 0x0e, 0x78, 0xb8, 0x7b, 0x25, 0xc1, 0xb4, 0xbb, 0xf2, 0x9f, 0xd1,
- 0xb4, 0x86, 0x1f, 0x7b, 0x1f, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x93, 0x15, 0xb9, 0x42, 0x4f,
- 0x1a, 0x00, 0x00,
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/pb/backend.proto b/vendor/github.com/hashicorp/vault/logical/plugin/pb/backend.proto
deleted file mode 100644
index e02cc1f9..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/pb/backend.proto
+++ /dev/null
@@ -1,594 +0,0 @@
-syntax = "proto3";
-package pb;
-
-option go_package = "github.com/hashicorp/vault/logical/plugin/pb";
-
-import "google/protobuf/timestamp.proto";
-import "logical/identity.proto";
-import "logical/plugin.proto";
-
-message Empty {}
-
-message Header {
- repeated string header = 1;
-}
-
-message ProtoError {
- // Error type can be one of:
- // ErrTypeUnknown uint32 = iota
- // ErrTypeUserError
- // ErrTypeInternalError
- // ErrTypeCodedError
- // ErrTypeStatusBadRequest
- // ErrTypeUnsupportedOperation
- // ErrTypeUnsupportedPath
- // ErrTypeInvalidRequest
- // ErrTypePermissionDenied
- // ErrTypeMultiAuthzPending
- uint32 err_type = 1;
- string err_msg = 2;
- int64 err_code = 3;
-}
-
-// Paths is the structure of special paths that is used for SpecialPaths.
-message Paths {
- // Root are the paths that require a root token to access
- repeated string root = 1;
-
- // Unauthenticated are the paths that can be accessed without any auth.
- repeated string unauthenticated = 2;
-
- // LocalStorage are paths (prefixes) that are local to this instance; this
- // indicates that these paths should not be replicated
- repeated string local_storage = 3;
-
- // SealWrapStorage are storage paths that, when using a capable seal,
- // should be seal wrapped with extra encryption. It is exact matching
- // unless it ends with '/' in which case it will be treated as a prefix.
- repeated string seal_wrap_storage = 4;
-}
-
-message Request {
- // Id is the uuid associated with each request
- string id = 1;
-
- // If set, the name given to the replication secondary where this request
- // originated
- string ReplicationCluster = 2;
-
- // Operation is the requested operation type
- string operation = 3;
-
- // Path is the part of the request path not consumed by the
- // routing. As an example, if the original request path is "prod/aws/foo"
- // and the AWS logical backend is mounted at "prod/aws/", then the
- // final path is "foo" since the mount prefix is trimmed.
- string path = 4;
-
- // Request data is a JSON object that must have keys with string type.
- string data = 5;
-
- // Secret will be non-nil only for Revoke and Renew operations
- // to represent the secret that was returned prior.
- Secret secret = 6;
-
- // Auth will be non-nil only for Renew operations
- // to represent the auth that was returned prior.
- Auth auth = 7;
-
- // Headers will contain the http headers from the request. This value will
- // be used in the audit broker to ensure we are auditing only the allowed
- // headers.
- map headers = 8;
-
- // ClientToken is provided to the core so that the identity
- // can be verified and ACLs applied. This value is passed
- // through to the logical backends but after being salted and
- // hashed.
- string client_token = 9;
-
- // ClientTokenAccessor is provided to the core so that the it can get
- // logged as part of request audit logging.
- string client_token_accessor = 10;
-
- // DisplayName is provided to the logical backend to help associate
- // dynamic secrets with the source entity. This is not a sensitive
- // name, but is useful for operators.
- string display_name = 11;
-
- // MountPoint is provided so that a logical backend can generate
- // paths relative to itself. The `Path` is effectively the client
- // request path with the MountPoint trimmed off.
- string mount_point = 12;
-
- // MountType is provided so that a logical backend can make decisions
- // based on the specific mount type (e.g., if a mount type has different
- // aliases, generating different defaults depending on the alias)
- string mount_type = 13;
-
- // MountAccessor is provided so that identities returned by the authentication
- // backends can be tied to the mount it belongs to.
- string mount_accessor = 14;
-
- // WrapInfo contains requested response wrapping parameters
- RequestWrapInfo wrap_info = 15;
-
- // ClientTokenRemainingUses represents the allowed number of uses left on the
- // token supplied
- int64 client_token_remaining_uses = 16;
-
- // EntityID is the identity of the caller extracted out of the token used
- // to make this request
- string entity_id = 17;
-
- // PolicyOverride indicates that the requestor wishes to override
- // soft-mandatory Sentinel policies
- bool policy_override = 18;
-
- // Whether the request is unauthenticated, as in, had no client token
- // attached. Useful in some situations where the client token is not made
- // accessible.
- bool unauthenticated = 19;
-
- // Connection will be non-nil only for credential providers to
- // inspect the connection information and potentially use it for
- // authentication/protection.
- Connection connection = 20;
-}
-
-message Auth {
- LeaseOptions lease_options = 1;
-
- // InternalData is a JSON object that is stored with the auth struct.
- // This will be sent back during a Renew/Revoke for storing internal data
- // used for those operations.
- string internal_data = 2;
-
- // DisplayName is a non-security sensitive identifier that is
- // applicable to this Auth. It is used for logging and prefixing
- // of dynamic secrets. For example, DisplayName may be "armon" for
- // the github credential backend. If the client token is used to
- // generate a SQL credential, the user may be "github-armon-uuid".
- // This is to help identify the source without using audit tables.
- string display_name = 3;
-
- // Policies is the list of policies that the authenticated user
- // is associated with.
- repeated string policies = 4;
-
- // Metadata is used to attach arbitrary string-type metadata to
- // an authenticated user. This metadata will be outputted into the
- // audit log.
- map metadata = 5;
-
- // ClientToken is the token that is generated for the authentication.
- // This will be filled in by Vault core when an auth structure is
- // returned. Setting this manually will have no effect.
- string client_token = 6;
-
- // Accessor is the identifier for the ClientToken. This can be used
- // to perform management functionalities (especially revocation) when
- // ClientToken in the audit logs are obfuscated. Accessor can be used
- // to revoke a ClientToken and to lookup the capabilities of the ClientToken,
- // both without actually knowing the ClientToken.
- string accessor = 7;
-
- // Period indicates that the token generated using this Auth object
- // should never expire. The token should be renewed within the duration
- // specified by this period.
- int64 period = 8;
-
- // Number of allowed uses of the issued token
- int64 num_uses = 9;
-
- // EntityID is the identifier of the entity in identity store to which the
- // identity of the authenticating client belongs to.
- string entity_id = 10;
-
- // Alias is the information about the authenticated client returned by
- // the auth backend
- logical.Alias alias = 11;
-
- // GroupAliases are the informational mappings of external groups which an
- // authenticated user belongs to. This is used to check if there are
- // mappings groups for the group aliases in identity store. For all the
- // matching groups, the entity ID of the user will be added.
- repeated logical.Alias group_aliases = 12;
-
- // If set, restricts usage of the certificates to client IPs falling within
- // the range of the specified CIDR(s).
- repeated string bound_cidrs = 13;
-
- // TokenPolicies and IdentityPolicies break down the list in Policies to
- // help determine where a policy was sourced
- repeated string token_policies = 14;
- repeated string identity_policies = 15;
-
- // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum
- // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens.
- int64 explicit_max_ttl = 16;
-
- // TokenType is the type of token being requested
- uint32 token_type = 17;
-}
-
-message TokenEntry {
- string id = 1;
- string accessor = 2;
- string parent = 3;
- repeated string policies = 4;
- string path = 5;
- map meta = 6;
- string display_name = 7;
- int64 num_uses = 8;
- int64 creation_time = 9;
- int64 ttl = 10;
- int64 explicit_max_ttl = 11;
- string role = 12;
- int64 period = 13;
- string entity_id = 14;
- repeated string bound_cidrs = 15;
- string namespace_id = 16;
- string cubbyhole_id = 17;
- uint32 type = 18;
-}
-
-message LeaseOptions {
- int64 TTL = 1;
-
- bool renewable = 2;
-
- int64 increment = 3;
-
- google.protobuf.Timestamp issue_time = 4;
-
- int64 MaxTTL = 5;
-}
-
-message Secret {
- LeaseOptions lease_options = 1;
-
- // InternalData is a JSON object that is stored with the secret.
- // This will be sent back during a Renew/Revoke for storing internal data
- // used for those operations.
- string internal_data = 2;
-
- // LeaseID is the ID returned to the user to manage this secret.
- // This is generated by Vault core. Any set value will be ignored.
- // For requests, this will always be blank.
- string lease_id = 3;
-}
-
-message Response {
- // Secret, if not nil, denotes that this response represents a secret.
- Secret secret = 1;
-
- // Auth, if not nil, contains the authentication information for
- // this response. This is only checked and means something for
- // credential backends.
- Auth auth = 2;
-
- // Response data is a JSON object that must have string keys. For
- // secrets, this data is sent down to the user as-is. To store internal
- // data that you don't want the user to see, store it in
- // Secret.InternalData.
- string data = 3;
-
- // Redirect is an HTTP URL to redirect to for further authentication.
- // This is only valid for credential backends. This will be blanked
- // for any logical backend and ignored.
- string redirect = 4;
-
- // Warnings allow operations or backends to return warnings in response
- // to user actions without failing the action outright.
- repeated string warnings = 5;
-
- // Information for wrapping the response in a cubbyhole
- ResponseWrapInfo wrap_info = 6;
-}
-
-message ResponseWrapInfo {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- int64 TTL = 1;
-
- // The token containing the wrapped response
- string token = 2;
-
- // The token accessor for the wrapped response token
- string accessor = 3;
-
- // The creation time. This can be used with the TTL to figure out an
- // expected expiration.
- google.protobuf.Timestamp creation_time = 4;
-
- // If the contained response is the output of a token creation call, the
- // created token's accessor will be accessible here
- string wrapped_accessor = 5;
-
- // WrappedEntityID is the entity identifier of the caller who initiated the
- // wrapping request
- string wrapped_entity_id = 6;
-
- // The format to use. This doesn't get returned, it's only internal.
- string format = 7;
-
- // CreationPath is the original request path that was used to create
- // the wrapped response.
- string creation_path = 8;
-
- // Controls seal wrapping behavior downstream for specific use cases
- bool seal_wrap = 9;
-}
-
-message RequestWrapInfo {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- int64 TTL = 1;
-
- // The format to use for the wrapped response; if not specified it's a bare
- // token
- string format = 2;
-
- // A flag to conforming backends that data for a given request should be
- // seal wrapped
- bool seal_wrap = 3;
-}
-
-// HandleRequestArgs is the args for HandleRequest method.
-message HandleRequestArgs {
- uint32 storage_id = 1;
- Request request = 2;
-}
-
-// HandleRequestReply is the reply for HandleRequest method.
-message HandleRequestReply {
- Response response = 1;
- ProtoError err = 2;
-}
-
-// SpecialPathsReply is the reply for SpecialPaths method.
-message SpecialPathsReply {
- Paths paths = 1;
-}
-
-// HandleExistenceCheckArgs is the args for HandleExistenceCheck method.
-message HandleExistenceCheckArgs {
- uint32 storage_id = 1;
- Request request = 2;
-}
-
-// HandleExistenceCheckReply is the reply for HandleExistenceCheck method.
-message HandleExistenceCheckReply {
- bool check_found = 1;
- bool exists = 2;
- ProtoError err = 3;
-}
-
-// SetupArgs is the args for Setup method.
-message SetupArgs {
- uint32 broker_id = 1;
- map Config = 2;
- string backendUUID = 3;
-}
-
-// SetupReply is the reply for Setup method.
-message SetupReply {
- string err = 1;
-}
-
-// TypeReply is the reply for the Type method.
-message TypeReply {
- uint32 type = 1;
-}
-
-message InvalidateKeyArgs {
- string key = 1;
-}
-
-// Backend is the interface that plugins must satisfy. The plugin should
-// implement the server for this service. Requests will first run the
-// HandleExistenceCheck rpc then run the HandleRequests rpc.
-service Backend {
- // HandleRequest is used to handle a request and generate a response.
- // The plugins must check the operation type and handle appropriately.
- rpc HandleRequest(HandleRequestArgs) returns (HandleRequestReply);
-
- // SpecialPaths is a list of paths that are special in some way.
- // See PathType for the types of special paths. The key is the type
- // of the special path, and the value is a list of paths for this type.
- // This is not a regular expression but is an exact match. If the path
- // ends in '*' then it is a prefix-based match. The '*' can only appear
- // at the end.
- rpc SpecialPaths(Empty) returns (SpecialPathsReply);
-
- // HandleExistenceCheck is used to handle a request and generate a response
- // indicating whether the given path exists or not; this is used to
- // understand whether the request must have a Create or Update capability
- // ACL applied. The first bool indicates whether an existence check
- // function was found for the backend; the second indicates whether, if an
- // existence check function was found, the item exists or not.
- rpc HandleExistenceCheck(HandleExistenceCheckArgs) returns (HandleExistenceCheckReply);
-
- // Cleanup is invoked during an unmount of a backend to allow it to
- // handle any cleanup like connection closing or releasing of file handles.
- // Cleanup is called right before Vault closes the plugin process.
- rpc Cleanup(Empty) returns (Empty);
-
- // InvalidateKey may be invoked when an object is modified that belongs
- // to the backend. The backend can use this to clear any caches or reset
- // internal state as needed.
- rpc InvalidateKey(InvalidateKeyArgs) returns (Empty);
-
- // Setup is used to set up the backend based on the provided backend
- // configuration. The plugin's setup implementation should use the provided
- // broker_id to create a connection back to Vault for use with the Storage
- // and SystemView clients.
- rpc Setup(SetupArgs) returns (SetupReply);
-
- // Type returns the BackendType for the particular backend
- rpc Type(Empty) returns (TypeReply);
-}
-
-message StorageEntry {
- string key = 1;
- bytes value = 2;
- bool seal_wrap = 3;
-}
-
-message StorageListArgs {
- string prefix = 1;
-}
-
-message StorageListReply {
- repeated string keys = 1;
- string err = 2;
-}
-
-message StorageGetArgs {
- string key = 1;
-}
-
-message StorageGetReply {
- StorageEntry entry = 1;
- string err = 2;
-}
-
-message StoragePutArgs {
- StorageEntry entry = 1;
-}
-
-message StoragePutReply {
- string err = 1;
-}
-
-message StorageDeleteArgs {
- string key = 1;
-}
-
-message StorageDeleteReply {
- string err = 1;
-}
-
-// Storage is the way that plugins are able read/write data. Plugins should
-// implement the client for this service.
-service Storage {
- rpc List(StorageListArgs) returns (StorageListReply);
- rpc Get(StorageGetArgs) returns (StorageGetReply);
- rpc Put(StoragePutArgs) returns (StoragePutReply);
- rpc Delete(StorageDeleteArgs) returns (StorageDeleteReply);
-}
-
-message TTLReply {
- int64 TTL = 1;
-}
-
-message SudoPrivilegeArgs {
- string path = 1;
- string token = 2;
-}
-
-message SudoPrivilegeReply {
- bool sudo = 1;
-}
-
-message TaintedReply {
- bool tainted = 1;
-}
-
-message CachingDisabledReply {
- bool disabled = 1;
-}
-
-message ReplicationStateReply {
- int32 state = 1;
-}
-
-message ResponseWrapDataArgs {
- string data = 1;
- int64 TTL = 2;
- bool JWT = 3;
-}
-
-message ResponseWrapDataReply {
- ResponseWrapInfo wrap_info = 1;
- string err = 2;
-}
-
-message MlockEnabledReply {
- bool enabled = 1;
-}
-
-message LocalMountReply {
- bool local = 1;
-}
-
-message EntityInfoArgs {
- string entity_id = 1;
-}
-
-message EntityInfoReply {
- logical.Entity entity = 1;
- string err = 2;
-}
-
-message PluginEnvReply {
- logical.PluginEnvironment plugin_environment = 1;
- string err = 2;
-}
-
-// SystemView exposes system configuration information in a safe way for plugins
-// to consume. Plugins should implement the client for this service.
-service SystemView {
- // DefaultLeaseTTL returns the default lease TTL set in Vault configuration
- rpc DefaultLeaseTTL(Empty) returns (TTLReply);
-
- // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend
- // authors should take care not to issue credentials that last longer than
- // this value, as Vault will revoke them
- rpc MaxLeaseTTL(Empty) returns (TTLReply);
-
- // SudoPrivilege returns true if given path has sudo privileges
- // for the given client token
- rpc SudoPrivilege(SudoPrivilegeArgs) returns (SudoPrivilegeReply);
-
- // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the
- // process of being unmounted. This should only be used in special
- // circumstances; a primary use-case is as a guard in revocation functions.
- // If revocation of a backend's leases fails it can keep the unmounting
- // process from being successful. If the reason for this failure is not
- // relevant when the mount is tainted (for instance, saving a CRL to disk
- // when the stored CRL will be removed during the unmounting process
- // anyways), we can ignore the errors to allow unmounting to complete.
- rpc Tainted(Empty) returns (TaintedReply);
-
- // CachingDisabled returns true if caching is disabled. If true, no caches
- // should be used, despite known slowdowns.
- rpc CachingDisabled(Empty) returns (CachingDisabledReply);
-
- // ReplicationState indicates the state of cluster replication
- rpc ReplicationState(Empty) returns (ReplicationStateReply);
-
- // ResponseWrapData wraps the given data in a cubbyhole and returns the
- // token used to unwrap.
- rpc ResponseWrapData(ResponseWrapDataArgs) returns (ResponseWrapDataReply);
-
- // MlockEnabled returns the configuration setting for enabling mlock on
- // plugins.
- rpc MlockEnabled(Empty) returns (MlockEnabledReply);
-
- // LocalMount, when run from a system view attached to a request, indicates
- // whether the request is affecting a local mount or not
- rpc LocalMount(Empty) returns (LocalMountReply);
-
- // EntityInfo returns the basic entity information for the given entity id
- rpc EntityInfo(EntityInfoArgs) returns (EntityInfoReply);
-
- // PluginEnv returns Vault environment information used by plugins
- rpc PluginEnv(Empty) returns (PluginEnvReply);
-}
-
-message Connection {
- // RemoteAddr is the network address that sent the request.
- string remote_addr = 1;
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/pb/translation.go b/vendor/github.com/hashicorp/vault/logical/plugin/pb/translation.go
deleted file mode 100644
index c777cae5..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/pb/translation.go
+++ /dev/null
@@ -1,622 +0,0 @@
-package pb
-
-import (
- "encoding/json"
- "errors"
- "time"
-
- "github.com/golang/protobuf/ptypes"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- ErrTypeUnknown uint32 = iota
- ErrTypeUserError
- ErrTypeInternalError
- ErrTypeCodedError
- ErrTypeStatusBadRequest
- ErrTypeUnsupportedOperation
- ErrTypeUnsupportedPath
- ErrTypeInvalidRequest
- ErrTypePermissionDenied
- ErrTypeMultiAuthzPending
-)
-
-func ProtoErrToErr(e *ProtoError) error {
- if e == nil {
- return nil
- }
-
- var err error
- switch e.ErrType {
- case ErrTypeUnknown:
- err = errors.New(e.ErrMsg)
- case ErrTypeUserError:
- err = errutil.UserError{Err: e.ErrMsg}
- case ErrTypeInternalError:
- err = errutil.InternalError{Err: e.ErrMsg}
- case ErrTypeCodedError:
- err = logical.CodedError(int(e.ErrCode), e.ErrMsg)
- case ErrTypeStatusBadRequest:
- err = &logical.StatusBadRequest{Err: e.ErrMsg}
- case ErrTypeUnsupportedOperation:
- err = logical.ErrUnsupportedOperation
- case ErrTypeUnsupportedPath:
- err = logical.ErrUnsupportedPath
- case ErrTypeInvalidRequest:
- err = logical.ErrInvalidRequest
- case ErrTypePermissionDenied:
- err = logical.ErrPermissionDenied
- case ErrTypeMultiAuthzPending:
- err = logical.ErrMultiAuthzPending
- }
-
- return err
-}
-
-func ErrToProtoErr(e error) *ProtoError {
- if e == nil {
- return nil
- }
- pbErr := &ProtoError{
- ErrMsg: e.Error(),
- ErrType: ErrTypeUnknown,
- }
-
- switch e.(type) {
- case errutil.UserError:
- pbErr.ErrType = ErrTypeUserError
- case errutil.InternalError:
- pbErr.ErrType = ErrTypeInternalError
- case logical.HTTPCodedError:
- pbErr.ErrType = ErrTypeCodedError
- pbErr.ErrCode = int64(e.(logical.HTTPCodedError).Code())
- case *logical.StatusBadRequest:
- pbErr.ErrType = ErrTypeStatusBadRequest
- }
-
- switch {
- case e == logical.ErrUnsupportedOperation:
- pbErr.ErrType = ErrTypeUnsupportedOperation
- case e == logical.ErrUnsupportedPath:
- pbErr.ErrType = ErrTypeUnsupportedPath
- case e == logical.ErrInvalidRequest:
- pbErr.ErrType = ErrTypeInvalidRequest
- case e == logical.ErrPermissionDenied:
- pbErr.ErrType = ErrTypePermissionDenied
- case e == logical.ErrMultiAuthzPending:
- pbErr.ErrType = ErrTypeMultiAuthzPending
- }
-
- return pbErr
-}
-
-func ErrToString(e error) string {
- if e == nil {
- return ""
- }
-
- return e.Error()
-}
-
-func LogicalStorageEntryToProtoStorageEntry(e *logical.StorageEntry) *StorageEntry {
- if e == nil {
- return nil
- }
-
- return &StorageEntry{
- Key: e.Key,
- Value: e.Value,
- SealWrap: e.SealWrap,
- }
-}
-
-func ProtoStorageEntryToLogicalStorageEntry(e *StorageEntry) *logical.StorageEntry {
- if e == nil {
- return nil
- }
-
- return &logical.StorageEntry{
- Key: e.Key,
- Value: e.Value,
- SealWrap: e.SealWrap,
- }
-}
-
-func ProtoLeaseOptionsToLogicalLeaseOptions(l *LeaseOptions) (logical.LeaseOptions, error) {
- if l == nil {
- return logical.LeaseOptions{}, nil
- }
-
- t, err := ptypes.Timestamp(l.IssueTime)
- return logical.LeaseOptions{
- TTL: time.Duration(l.TTL),
- Renewable: l.Renewable,
- Increment: time.Duration(l.Increment),
- IssueTime: t,
- MaxTTL: time.Duration(l.MaxTTL),
- }, err
-}
-
-func LogicalLeaseOptionsToProtoLeaseOptions(l logical.LeaseOptions) (*LeaseOptions, error) {
- t, err := ptypes.TimestampProto(l.IssueTime)
- if err != nil {
- return nil, err
- }
-
- return &LeaseOptions{
- TTL: int64(l.TTL),
- Renewable: l.Renewable,
- Increment: int64(l.Increment),
- IssueTime: t,
- MaxTTL: int64(l.MaxTTL),
- }, err
-}
-
-func ProtoSecretToLogicalSecret(s *Secret) (*logical.Secret, error) {
- if s == nil {
- return nil, nil
- }
-
- data := map[string]interface{}{}
- err := json.Unmarshal([]byte(s.InternalData), &data)
- if err != nil {
- return nil, err
- }
-
- lease, err := ProtoLeaseOptionsToLogicalLeaseOptions(s.LeaseOptions)
- if err != nil {
- return nil, err
- }
-
- return &logical.Secret{
- LeaseOptions: lease,
- InternalData: data,
- LeaseID: s.LeaseID,
- }, nil
-}
-
-func LogicalSecretToProtoSecret(s *logical.Secret) (*Secret, error) {
- if s == nil {
- return nil, nil
- }
-
- buf, err := json.Marshal(s.InternalData)
- if err != nil {
- return nil, err
- }
-
- lease, err := LogicalLeaseOptionsToProtoLeaseOptions(s.LeaseOptions)
- if err != nil {
- return nil, err
- }
-
- return &Secret{
- LeaseOptions: lease,
- InternalData: string(buf[:]),
- LeaseID: s.LeaseID,
- }, err
-}
-
-func LogicalRequestToProtoRequest(r *logical.Request) (*Request, error) {
- if r == nil {
- return nil, nil
- }
-
- buf, err := json.Marshal(r.Data)
- if err != nil {
- return nil, err
- }
-
- secret, err := LogicalSecretToProtoSecret(r.Secret)
- if err != nil {
- return nil, err
- }
-
- auth, err := LogicalAuthToProtoAuth(r.Auth)
- if err != nil {
- return nil, err
- }
-
- headers := map[string]*Header{}
- for k, v := range r.Headers {
- headers[k] = &Header{Header: v}
- }
-
- return &Request{
- ID: r.ID,
- ReplicationCluster: r.ReplicationCluster,
- Operation: string(r.Operation),
- Path: r.Path,
- Data: string(buf[:]),
- Secret: secret,
- Auth: auth,
- Headers: headers,
- ClientToken: r.ClientToken,
- ClientTokenAccessor: r.ClientTokenAccessor,
- DisplayName: r.DisplayName,
- MountPoint: r.MountPoint,
- MountType: r.MountType,
- MountAccessor: r.MountAccessor,
- WrapInfo: LogicalRequestWrapInfoToProtoRequestWrapInfo(r.WrapInfo),
- ClientTokenRemainingUses: int64(r.ClientTokenRemainingUses),
- Connection: LogicalConnectionToProtoConnection(r.Connection),
- EntityID: r.EntityID,
- PolicyOverride: r.PolicyOverride,
- Unauthenticated: r.Unauthenticated,
- }, nil
-}
-
-func ProtoRequestToLogicalRequest(r *Request) (*logical.Request, error) {
- if r == nil {
- return nil, nil
- }
-
- data := map[string]interface{}{}
- err := json.Unmarshal([]byte(r.Data), &data)
- if err != nil {
- return nil, err
- }
-
- secret, err := ProtoSecretToLogicalSecret(r.Secret)
- if err != nil {
- return nil, err
- }
-
- auth, err := ProtoAuthToLogicalAuth(r.Auth)
- if err != nil {
- return nil, err
- }
-
- var headers map[string][]string
- if len(r.Headers) > 0 {
- headers = make(map[string][]string, len(r.Headers))
- for k, v := range r.Headers {
- headers[k] = v.Header
- }
- }
-
- return &logical.Request{
- ID: r.ID,
- ReplicationCluster: r.ReplicationCluster,
- Operation: logical.Operation(r.Operation),
- Path: r.Path,
- Data: data,
- Secret: secret,
- Auth: auth,
- Headers: headers,
- ClientToken: r.ClientToken,
- ClientTokenAccessor: r.ClientTokenAccessor,
- DisplayName: r.DisplayName,
- MountPoint: r.MountPoint,
- MountType: r.MountType,
- MountAccessor: r.MountAccessor,
- WrapInfo: ProtoRequestWrapInfoToLogicalRequestWrapInfo(r.WrapInfo),
- ClientTokenRemainingUses: int(r.ClientTokenRemainingUses),
- Connection: ProtoConnectionToLogicalConnection(r.Connection),
- EntityID: r.EntityID,
- PolicyOverride: r.PolicyOverride,
- Unauthenticated: r.Unauthenticated,
- }, nil
-}
-
-func LogicalConnectionToProtoConnection(c *logical.Connection) *Connection {
- if c == nil {
- return nil
- }
-
- return &Connection{
- RemoteAddr: c.RemoteAddr,
- }
-}
-
-func ProtoConnectionToLogicalConnection(c *Connection) *logical.Connection {
- if c == nil {
- return nil
- }
-
- return &logical.Connection{
- RemoteAddr: c.RemoteAddr,
- }
-}
-
-func LogicalRequestWrapInfoToProtoRequestWrapInfo(i *logical.RequestWrapInfo) *RequestWrapInfo {
- if i == nil {
- return nil
- }
-
- return &RequestWrapInfo{
- TTL: int64(i.TTL),
- Format: i.Format,
- SealWrap: i.SealWrap,
- }
-}
-
-func ProtoRequestWrapInfoToLogicalRequestWrapInfo(i *RequestWrapInfo) *logical.RequestWrapInfo {
- if i == nil {
- return nil
- }
-
- return &logical.RequestWrapInfo{
- TTL: time.Duration(i.TTL),
- Format: i.Format,
- SealWrap: i.SealWrap,
- }
-}
-
-func ProtoResponseToLogicalResponse(r *Response) (*logical.Response, error) {
- if r == nil {
- return nil, nil
- }
-
- secret, err := ProtoSecretToLogicalSecret(r.Secret)
- if err != nil {
- return nil, err
- }
-
- auth, err := ProtoAuthToLogicalAuth(r.Auth)
- if err != nil {
- return nil, err
- }
-
- data := map[string]interface{}{}
- err = json.Unmarshal([]byte(r.Data), &data)
- if err != nil {
- return nil, err
- }
-
- wrapInfo, err := ProtoResponseWrapInfoToLogicalResponseWrapInfo(r.WrapInfo)
- if err != nil {
- return nil, err
- }
-
- return &logical.Response{
- Secret: secret,
- Auth: auth,
- Data: data,
- Redirect: r.Redirect,
- Warnings: r.Warnings,
- WrapInfo: wrapInfo,
- }, nil
-}
-
-func ProtoResponseWrapInfoToLogicalResponseWrapInfo(i *ResponseWrapInfo) (*wrapping.ResponseWrapInfo, error) {
- if i == nil {
- return nil, nil
- }
-
- t, err := ptypes.Timestamp(i.CreationTime)
- if err != nil {
- return nil, err
- }
-
- return &wrapping.ResponseWrapInfo{
- TTL: time.Duration(i.TTL),
- Token: i.Token,
- Accessor: i.Accessor,
- CreationTime: t,
- WrappedAccessor: i.WrappedAccessor,
- WrappedEntityID: i.WrappedEntityID,
- Format: i.Format,
- CreationPath: i.CreationPath,
- SealWrap: i.SealWrap,
- }, nil
-}
-
-func LogicalResponseWrapInfoToProtoResponseWrapInfo(i *wrapping.ResponseWrapInfo) (*ResponseWrapInfo, error) {
- if i == nil {
- return nil, nil
- }
-
- t, err := ptypes.TimestampProto(i.CreationTime)
- if err != nil {
- return nil, err
- }
-
- return &ResponseWrapInfo{
- TTL: int64(i.TTL),
- Token: i.Token,
- Accessor: i.Accessor,
- CreationTime: t,
- WrappedAccessor: i.WrappedAccessor,
- WrappedEntityID: i.WrappedEntityID,
- Format: i.Format,
- CreationPath: i.CreationPath,
- SealWrap: i.SealWrap,
- }, nil
-}
-
-func LogicalResponseToProtoResponse(r *logical.Response) (*Response, error) {
- if r == nil {
- return nil, nil
- }
-
- secret, err := LogicalSecretToProtoSecret(r.Secret)
- if err != nil {
- return nil, err
- }
-
- auth, err := LogicalAuthToProtoAuth(r.Auth)
- if err != nil {
- return nil, err
- }
-
- buf, err := json.Marshal(r.Data)
- if err != nil {
- return nil, err
- }
-
- wrapInfo, err := LogicalResponseWrapInfoToProtoResponseWrapInfo(r.WrapInfo)
- if err != nil {
- return nil, err
- }
-
- return &Response{
- Secret: secret,
- Auth: auth,
- Data: string(buf[:]),
- Redirect: r.Redirect,
- Warnings: r.Warnings,
- WrapInfo: wrapInfo,
- }, nil
-}
-
-func LogicalAuthToProtoAuth(a *logical.Auth) (*Auth, error) {
- if a == nil {
- return nil, nil
- }
-
- buf, err := json.Marshal(a.InternalData)
- if err != nil {
- return nil, err
- }
-
- lo, err := LogicalLeaseOptionsToProtoLeaseOptions(a.LeaseOptions)
- if err != nil {
- return nil, err
- }
-
- boundCIDRs := make([]string, len(a.BoundCIDRs))
- for i, cidr := range a.BoundCIDRs {
- boundCIDRs[i] = cidr.String()
- }
-
- return &Auth{
- LeaseOptions: lo,
- TokenType: uint32(a.TokenType),
- InternalData: string(buf[:]),
- DisplayName: a.DisplayName,
- Policies: a.Policies,
- TokenPolicies: a.TokenPolicies,
- IdentityPolicies: a.IdentityPolicies,
- Metadata: a.Metadata,
- ClientToken: a.ClientToken,
- Accessor: a.Accessor,
- Period: int64(a.Period),
- NumUses: int64(a.NumUses),
- EntityID: a.EntityID,
- Alias: a.Alias,
- GroupAliases: a.GroupAliases,
- BoundCIDRs: boundCIDRs,
- ExplicitMaxTTL: int64(a.ExplicitMaxTTL),
- }, nil
-}
-
-func ProtoAuthToLogicalAuth(a *Auth) (*logical.Auth, error) {
- if a == nil {
- return nil, nil
- }
-
- data := map[string]interface{}{}
- err := json.Unmarshal([]byte(a.InternalData), &data)
- if err != nil {
- return nil, err
- }
-
- lo, err := ProtoLeaseOptionsToLogicalLeaseOptions(a.LeaseOptions)
- if err != nil {
- return nil, err
- }
-
- boundCIDRs, err := parseutil.ParseAddrs(a.BoundCIDRs)
- if err != nil {
- return nil, err
- }
- if len(boundCIDRs) == 0 {
- // On inbound auths, if auth.BoundCIDRs is empty, it will be nil.
- // Let's match that behavior outbound.
- boundCIDRs = nil
- }
-
- return &logical.Auth{
- LeaseOptions: lo,
- TokenType: logical.TokenType(a.TokenType),
- InternalData: data,
- DisplayName: a.DisplayName,
- Policies: a.Policies,
- TokenPolicies: a.TokenPolicies,
- IdentityPolicies: a.IdentityPolicies,
- Metadata: a.Metadata,
- ClientToken: a.ClientToken,
- Accessor: a.Accessor,
- Period: time.Duration(a.Period),
- NumUses: int(a.NumUses),
- EntityID: a.EntityID,
- Alias: a.Alias,
- GroupAliases: a.GroupAliases,
- BoundCIDRs: boundCIDRs,
- ExplicitMaxTTL: time.Duration(a.ExplicitMaxTTL),
- }, nil
-}
-
-func LogicalTokenEntryToProtoTokenEntry(t *logical.TokenEntry) *TokenEntry {
- if t == nil {
- return nil
- }
-
- boundCIDRs := make([]string, len(t.BoundCIDRs))
- for i, cidr := range t.BoundCIDRs {
- boundCIDRs[i] = cidr.String()
- }
-
- return &TokenEntry{
- ID: t.ID,
- Accessor: t.Accessor,
- Parent: t.Parent,
- Policies: t.Policies,
- Path: t.Path,
- Meta: t.Meta,
- DisplayName: t.DisplayName,
- NumUses: int64(t.NumUses),
- CreationTime: t.CreationTime,
- TTL: int64(t.TTL),
- ExplicitMaxTTL: int64(t.ExplicitMaxTTL),
- Role: t.Role,
- Period: int64(t.Period),
- EntityID: t.EntityID,
- BoundCIDRs: boundCIDRs,
- NamespaceID: t.NamespaceID,
- CubbyholeID: t.CubbyholeID,
- Type: uint32(t.Type),
- }
-}
-
-func ProtoTokenEntryToLogicalTokenEntry(t *TokenEntry) (*logical.TokenEntry, error) {
- if t == nil {
- return nil, nil
- }
-
- boundCIDRs, err := parseutil.ParseAddrs(t.BoundCIDRs)
- if err != nil {
- return nil, err
- }
- if len(boundCIDRs) == 0 {
- // On inbound auths, if auth.BoundCIDRs is empty, it will be nil.
- // Let's match that behavior outbound.
- boundCIDRs = nil
- }
-
- return &logical.TokenEntry{
- ID: t.ID,
- Accessor: t.Accessor,
- Parent: t.Parent,
- Policies: t.Policies,
- Path: t.Path,
- Meta: t.Meta,
- DisplayName: t.DisplayName,
- NumUses: int(t.NumUses),
- CreationTime: t.CreationTime,
- TTL: time.Duration(t.TTL),
- ExplicitMaxTTL: time.Duration(t.ExplicitMaxTTL),
- Role: t.Role,
- Period: time.Duration(t.Period),
- EntityID: t.EntityID,
- BoundCIDRs: boundCIDRs,
- NamespaceID: t.NamespaceID,
- CubbyholeID: t.CubbyholeID,
- Type: logical.TokenType(t.Type),
- }, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go b/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go
deleted file mode 100644
index 250097c2..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/plugin.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package plugin
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/rsa"
- "encoding/gob"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "github.com/hashicorp/errwrap"
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// init registers basic structs with gob which will be used to transport complex
-// types through the plugin server and client.
-func init() {
- // Common basic structs
- gob.Register([]interface{}{})
- gob.Register(map[string]interface{}{})
- gob.Register(map[string]string{})
- gob.Register(map[string]int{})
-
- // Register these types since we have to serialize and de-serialize
- // tls.ConnectionState over the wire as part of logical.Request.Connection.
- gob.Register(rsa.PublicKey{})
- gob.Register(ecdsa.PublicKey{})
- gob.Register(time.Duration(0))
-
- // Custom common error types for requests. If you add something here, you must
- // also add it to the switch statement in `wrapError`!
- gob.Register(&plugin.BasicError{})
- gob.Register(logical.CodedError(0, ""))
- gob.Register(&logical.StatusBadRequest{})
-}
-
-// BackendPluginClient is a wrapper around backendPluginClient
-// that also contains its plugin.Client instance. It's primarily
-// used to cleanly kill the client on Cleanup()
-type BackendPluginClient struct {
- client *plugin.Client
- sync.Mutex
-
- logical.Backend
-}
-
-// Cleanup calls the RPC client's Cleanup() func and also calls
-// the go-plugin's client Kill() func
-func (b *BackendPluginClient) Cleanup(ctx context.Context) {
- b.Backend.Cleanup(ctx)
- b.client.Kill()
-}
-
-// NewBackend will return an instance of an RPC-based client implementation of the backend for
-// external plugins, or a concrete implementation of the backend if it is a builtin backend.
-// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether
-// the plugin should run in metadata mode.
-func NewBackend(ctx context.Context, pluginName string, pluginType consts.PluginType, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig, isMetadataMode bool) (logical.Backend, error) {
- // Look for plugin in the plugin catalog
- pluginRunner, err := sys.LookupPlugin(ctx, pluginName, pluginType)
- if err != nil {
- return nil, err
- }
-
- var backend logical.Backend
- if pluginRunner.Builtin {
- // Plugin is builtin so we can retrieve an instance of the interface
- // from the pluginRunner. Then cast it to logical.Factory.
- rawFactory, err := pluginRunner.BuiltinFactory()
- if err != nil {
- return nil, errwrap.Wrapf("error getting plugin type: {{err}}", err)
- }
-
- if factory, ok := rawFactory.(logical.Factory); !ok {
- return nil, fmt.Errorf("unsupported backend type: %q", pluginName)
- } else {
- if backend, err = factory(ctx, conf); err != nil {
- return nil, err
- }
- }
- } else {
- // create a backendPluginClient instance
- backend, err = NewPluginClient(ctx, sys, pluginRunner, conf.Logger, isMetadataMode)
- if err != nil {
- return nil, err
- }
- }
-
- return backend, nil
-}
-
-func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) {
- // pluginMap is the map of plugins we can dispense.
- pluginSet := map[int]plugin.PluginSet{
- 3: plugin.PluginSet{
- "backend": &BackendPlugin{
- GRPCBackendPlugin: &GRPCBackendPlugin{
- MetadataMode: isMetadataMode,
- },
- },
- },
- 4: plugin.PluginSet{
- "backend": &GRPCBackendPlugin{
- MetadataMode: isMetadataMode,
- },
- },
- }
-
- namedLogger := logger.Named(pluginRunner.Name)
-
- var client *plugin.Client
- var err error
- if isMetadataMode {
- client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginSet, handshakeConfig, []string{}, namedLogger)
- } else {
- client, err = pluginRunner.Run(ctx, sys, pluginSet, handshakeConfig, []string{}, namedLogger)
- }
- if err != nil {
- return nil, err
- }
-
- // Connect via RPC
- rpcClient, err := client.Client()
- if err != nil {
- return nil, err
- }
-
- // Request the plugin
- raw, err := rpcClient.Dispense("backend")
- if err != nil {
- return nil, err
- }
-
- var backend logical.Backend
- var transport string
- // We should have a logical backend type now. This feels like a normal interface
- // implementation but is in fact over an RPC connection.
- switch raw.(type) {
- case *backendPluginClient:
- logger.Warn("plugin is using deprecated netRPC transport, recompile plugin to upgrade to gRPC", "plugin", pluginRunner.Name)
- backend = raw.(*backendPluginClient)
- transport = "netRPC"
- case *backendGRPCPluginClient:
- backend = raw.(*backendGRPCPluginClient)
- transport = "gRPC"
- default:
- return nil, errors.New("unsupported plugin client type")
- }
-
- // Wrap the backend in a tracing middleware
- if namedLogger.IsTrace() {
- backend = &backendTracingMiddleware{
- logger: namedLogger.With("transport", transport),
- next: backend,
- }
- }
-
- return &BackendPluginClient{
- client: client,
- Backend: backend,
- }, nil
-}
-
-// wrapError takes a generic error type and makes it usable with the plugin
-// interface. Only errors which have exported fields and have been registered
-// with gob can be unwrapped and transported. This checks error types and, if
-// none match, wrap the error in a plugin.BasicError.
-func wrapError(err error) error {
- if err == nil {
- return nil
- }
-
- switch err.(type) {
- case *plugin.BasicError,
- logical.HTTPCodedError,
- *logical.StatusBadRequest:
- return err
- }
-
- return plugin.NewBasicError(err)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/serve.go b/vendor/github.com/hashicorp/vault/logical/plugin/serve.go
deleted file mode 100644
index 97b9f28b..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/serve.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package plugin
-
-import (
- "crypto/tls"
- "math"
- "os"
-
- "google.golang.org/grpc"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// BackendPluginName is the name of the plugin that can be
-// dispensed from the plugin server.
-const BackendPluginName = "backend"
-
-type TLSProviderFunc func() (*tls.Config, error)
-
-type ServeOpts struct {
- BackendFactoryFunc logical.Factory
- TLSProviderFunc TLSProviderFunc
- Logger log.Logger
-}
-
-// Serve is a helper function used to serve a backend plugin. This
-// should be ran on the plugin's main process.
-func Serve(opts *ServeOpts) error {
- logger := opts.Logger
- if logger == nil {
- logger = log.New(&log.LoggerOptions{
- Level: log.Trace,
- Output: os.Stderr,
- JSONFormat: true,
- })
- }
-
- // pluginMap is the map of plugins we can dispense.
- pluginSets := map[int]plugin.PluginSet{
- 3: plugin.PluginSet{
- "backend": &BackendPlugin{
- GRPCBackendPlugin: &GRPCBackendPlugin{
- Factory: opts.BackendFactoryFunc,
- Logger: logger,
- },
- },
- },
- 4: plugin.PluginSet{
- "backend": &GRPCBackendPlugin{
- Factory: opts.BackendFactoryFunc,
- Logger: logger,
- },
- },
- }
-
- err := pluginutil.OptionallyEnableMlock()
- if err != nil {
- return err
- }
-
- serveOpts := &plugin.ServeConfig{
- HandshakeConfig: handshakeConfig,
- VersionedPlugins: pluginSets,
- TLSProvider: opts.TLSProviderFunc,
- Logger: logger,
-
- // A non-nil value here enables gRPC serving for this plugin...
- GRPCServer: func(opts []grpc.ServerOption) *grpc.Server {
- opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32))
- opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32))
- return plugin.DefaultGRPCServer(opts)
- },
- }
-
- // If we do not have gRPC support fallback to version 3
- // Remove this block in 0.13
- if !pluginutil.GRPCSupport() {
- serveOpts.GRPCServer = nil
- delete(pluginSets, 4)
- }
-
- plugin.Serve(serveOpts)
-
- return nil
-}
-
-// handshakeConfigs are used to just do a basic handshake between
-// a plugin and host. If the handshake fails, a user friendly error is shown.
-// This prevents users from executing bad plugins or executing a plugin
-// directory. It is a UX feature, not a security feature.
-var handshakeConfig = plugin.HandshakeConfig{
- ProtocolVersion: 4,
- MagicCookieKey: "VAULT_BACKEND_PLUGIN",
- MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20",
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/storage.go b/vendor/github.com/hashicorp/vault/logical/plugin/storage.go
deleted file mode 100644
index 75cda550..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/storage.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package plugin
-
-import (
- "context"
- "net/rpc"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// StorageClient is an implementation of logical.Storage that communicates
-// over RPC.
-type StorageClient struct {
- client *rpc.Client
-}
-
-func (s *StorageClient) List(_ context.Context, prefix string) ([]string, error) {
- var reply StorageListReply
- err := s.client.Call("Plugin.List", prefix, &reply)
- if err != nil {
- return reply.Keys, err
- }
- if reply.Error != nil {
- return reply.Keys, reply.Error
- }
- return reply.Keys, nil
-}
-
-func (s *StorageClient) Get(_ context.Context, key string) (*logical.StorageEntry, error) {
- var reply StorageGetReply
- err := s.client.Call("Plugin.Get", key, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
- return reply.StorageEntry, nil
-}
-
-func (s *StorageClient) Put(_ context.Context, entry *logical.StorageEntry) error {
- var reply StoragePutReply
- err := s.client.Call("Plugin.Put", entry, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
- return nil
-}
-
-func (s *StorageClient) Delete(_ context.Context, key string) error {
- var reply StorageDeleteReply
- err := s.client.Call("Plugin.Delete", key, &reply)
- if err != nil {
- return err
- }
- if reply.Error != nil {
- return reply.Error
- }
- return nil
-}
-
-// StorageServer is a net/rpc compatible structure for serving
-type StorageServer struct {
- impl logical.Storage
-}
-
-func (s *StorageServer) List(prefix string, reply *StorageListReply) error {
- keys, err := s.impl.List(context.Background(), prefix)
- *reply = StorageListReply{
- Keys: keys,
- Error: wrapError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Get(key string, reply *StorageGetReply) error {
- storageEntry, err := s.impl.Get(context.Background(), key)
- *reply = StorageGetReply{
- StorageEntry: storageEntry,
- Error: wrapError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Put(entry *logical.StorageEntry, reply *StoragePutReply) error {
- err := s.impl.Put(context.Background(), entry)
- *reply = StoragePutReply{
- Error: wrapError(err),
- }
- return nil
-}
-
-func (s *StorageServer) Delete(key string, reply *StorageDeleteReply) error {
- err := s.impl.Delete(context.Background(), key)
- *reply = StorageDeleteReply{
- Error: wrapError(err),
- }
- return nil
-}
-
-type StorageListReply struct {
- Keys []string
- Error error
-}
-
-type StorageGetReply struct {
- StorageEntry *logical.StorageEntry
- Error error
-}
-
-type StoragePutReply struct {
- Error error
-}
-
-type StorageDeleteReply struct {
- Error error
-}
-
-// NOOPStorage is used to deny access to the storage interface while running a
-// backend plugin in metadata mode.
-type NOOPStorage struct{}
-
-func (s *NOOPStorage) List(_ context.Context, prefix string) ([]string, error) {
- return []string{}, nil
-}
-
-func (s *NOOPStorage) Get(_ context.Context, key string) (*logical.StorageEntry, error) {
- return nil, nil
-}
-
-func (s *NOOPStorage) Put(_ context.Context, entry *logical.StorageEntry) error {
- return nil
-}
-
-func (s *NOOPStorage) Delete(_ context.Context, key string) error {
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/plugin/system.go b/vendor/github.com/hashicorp/vault/logical/plugin/system.go
deleted file mode 100644
index 148f39a9..00000000
--- a/vendor/github.com/hashicorp/vault/logical/plugin/system.go
+++ /dev/null
@@ -1,351 +0,0 @@
-package plugin
-
-import (
- "context"
- "net/rpc"
- "time"
-
- "fmt"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/license"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
-)
-
-type SystemViewClient struct {
- client *rpc.Client
-}
-
-func (s *SystemViewClient) DefaultLeaseTTL() time.Duration {
- var reply DefaultLeaseTTLReply
- err := s.client.Call("Plugin.DefaultLeaseTTL", new(interface{}), &reply)
- if err != nil {
- return 0
- }
-
- return reply.DefaultLeaseTTL
-}
-
-func (s *SystemViewClient) MaxLeaseTTL() time.Duration {
- var reply MaxLeaseTTLReply
- err := s.client.Call("Plugin.MaxLeaseTTL", new(interface{}), &reply)
- if err != nil {
- return 0
- }
-
- return reply.MaxLeaseTTL
-}
-
-func (s *SystemViewClient) SudoPrivilege(ctx context.Context, path string, token string) bool {
- var reply SudoPrivilegeReply
- args := &SudoPrivilegeArgs{
- Path: path,
- Token: token,
- }
-
- err := s.client.Call("Plugin.SudoPrivilege", args, &reply)
- if err != nil {
- return false
- }
-
- return reply.Sudo
-}
-
-func (s *SystemViewClient) Tainted() bool {
- var reply TaintedReply
-
- err := s.client.Call("Plugin.Tainted", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.Tainted
-}
-
-func (s *SystemViewClient) CachingDisabled() bool {
- var reply CachingDisabledReply
-
- err := s.client.Call("Plugin.CachingDisabled", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.CachingDisabled
-}
-
-func (s *SystemViewClient) ReplicationState() consts.ReplicationState {
- var reply ReplicationStateReply
-
- err := s.client.Call("Plugin.ReplicationState", new(interface{}), &reply)
- if err != nil {
- return consts.ReplicationUnknown
- }
-
- return reply.ReplicationState
-}
-
-func (s *SystemViewClient) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- var reply ResponseWrapDataReply
- // Do not allow JWTs to be returned
- args := &ResponseWrapDataArgs{
- Data: data,
- TTL: ttl,
- JWT: false,
- }
-
- err := s.client.Call("Plugin.ResponseWrapData", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
-
- return reply.ResponseWrapInfo, nil
-}
-
-func (s *SystemViewClient) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) {
- return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend")
-}
-
-func (s *SystemViewClient) HasFeature(feature license.Features) bool {
- // Not implemented
- return false
-}
-
-func (s *SystemViewClient) MlockEnabled() bool {
- var reply MlockEnabledReply
- err := s.client.Call("Plugin.MlockEnabled", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.MlockEnabled
-}
-
-func (s *SystemViewClient) LocalMount() bool {
- var reply LocalMountReply
- err := s.client.Call("Plugin.LocalMount", new(interface{}), &reply)
- if err != nil {
- return false
- }
-
- return reply.Local
-}
-
-func (s *SystemViewClient) EntityInfo(entityID string) (*logical.Entity, error) {
- var reply EntityInfoReply
- args := &EntityInfoArgs{
- EntityID: entityID,
- }
-
- err := s.client.Call("Plugin.EntityInfo", args, &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
-
- return reply.Entity, nil
-}
-
-func (s *SystemViewClient) PluginEnv(_ context.Context) (*logical.PluginEnvironment, error) {
- var reply PluginEnvReply
-
- err := s.client.Call("Plugin.PluginEnv", new(interface{}), &reply)
- if err != nil {
- return nil, err
- }
- if reply.Error != nil {
- return nil, reply.Error
- }
-
- return reply.PluginEnvironment, nil
-}
-
-type SystemViewServer struct {
- impl logical.SystemView
-}
-
-func (s *SystemViewServer) DefaultLeaseTTL(_ interface{}, reply *DefaultLeaseTTLReply) error {
- ttl := s.impl.DefaultLeaseTTL()
- *reply = DefaultLeaseTTLReply{
- DefaultLeaseTTL: ttl,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) MaxLeaseTTL(_ interface{}, reply *MaxLeaseTTLReply) error {
- ttl := s.impl.MaxLeaseTTL()
- *reply = MaxLeaseTTLReply{
- MaxLeaseTTL: ttl,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) SudoPrivilege(args *SudoPrivilegeArgs, reply *SudoPrivilegeReply) error {
- sudo := s.impl.SudoPrivilege(context.Background(), args.Path, args.Token)
- *reply = SudoPrivilegeReply{
- Sudo: sudo,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) Tainted(_ interface{}, reply *TaintedReply) error {
- tainted := s.impl.Tainted()
- *reply = TaintedReply{
- Tainted: tainted,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) CachingDisabled(_ interface{}, reply *CachingDisabledReply) error {
- cachingDisabled := s.impl.CachingDisabled()
- *reply = CachingDisabledReply{
- CachingDisabled: cachingDisabled,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) ReplicationState(_ interface{}, reply *ReplicationStateReply) error {
- replicationState := s.impl.ReplicationState()
- *reply = ReplicationStateReply{
- ReplicationState: replicationState,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) ResponseWrapData(args *ResponseWrapDataArgs, reply *ResponseWrapDataReply) error {
- // Do not allow JWTs to be returned
- info, err := s.impl.ResponseWrapData(context.Background(), args.Data, args.TTL, false)
- if err != nil {
- *reply = ResponseWrapDataReply{
- Error: wrapError(err),
- }
- return nil
- }
- *reply = ResponseWrapDataReply{
- ResponseWrapInfo: info,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) MlockEnabled(_ interface{}, reply *MlockEnabledReply) error {
- enabled := s.impl.MlockEnabled()
- *reply = MlockEnabledReply{
- MlockEnabled: enabled,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) LocalMount(_ interface{}, reply *LocalMountReply) error {
- local := s.impl.LocalMount()
- *reply = LocalMountReply{
- Local: local,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) EntityInfo(args *EntityInfoArgs, reply *EntityInfoReply) error {
- entity, err := s.impl.EntityInfo(args.EntityID)
- if err != nil {
- *reply = EntityInfoReply{
- Error: wrapError(err),
- }
- return nil
- }
- *reply = EntityInfoReply{
- Entity: entity,
- }
-
- return nil
-}
-
-func (s *SystemViewServer) PluginEnv(_ interface{}, reply *PluginEnvReply) error {
- pluginEnv, err := s.impl.PluginEnv(context.Background())
- if err != nil {
- *reply = PluginEnvReply{
- Error: wrapError(err),
- }
- return nil
- }
- *reply = PluginEnvReply{
- PluginEnvironment: pluginEnv,
- }
-
- return nil
-}
-
-type DefaultLeaseTTLReply struct {
- DefaultLeaseTTL time.Duration
-}
-
-type MaxLeaseTTLReply struct {
- MaxLeaseTTL time.Duration
-}
-
-type SudoPrivilegeArgs struct {
- Path string
- Token string
-}
-
-type SudoPrivilegeReply struct {
- Sudo bool
-}
-
-type TaintedReply struct {
- Tainted bool
-}
-
-type CachingDisabledReply struct {
- CachingDisabled bool
-}
-
-type ReplicationStateReply struct {
- ReplicationState consts.ReplicationState
-}
-
-type ResponseWrapDataArgs struct {
- Data map[string]interface{}
- TTL time.Duration
- JWT bool
-}
-
-type ResponseWrapDataReply struct {
- ResponseWrapInfo *wrapping.ResponseWrapInfo
- Error error
-}
-
-type MlockEnabledReply struct {
- MlockEnabled bool
-}
-
-type LocalMountReply struct {
- Local bool
-}
-
-type EntityInfoArgs struct {
- EntityID string
-}
-
-type EntityInfoReply struct {
- Entity *logical.Entity
- Error error
-}
-
-type PluginEnvReply struct {
- PluginEnvironment *logical.PluginEnvironment
- Error error
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/request.go b/vendor/github.com/hashicorp/vault/logical/request.go
deleted file mode 100644
index 8380270d..00000000
--- a/vendor/github.com/hashicorp/vault/logical/request.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package logical
-
-import (
- "fmt"
- "strings"
- "time"
-)
-
-// RequestWrapInfo is a struct that stores information about desired response
-// and seal wrapping behavior
-type RequestWrapInfo struct {
- // Setting to non-zero specifies that the response should be wrapped.
- // Specifies the desired TTL of the wrapping token.
- TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""`
-
- // The format to use for the wrapped response; if not specified it's a bare
- // token
- Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""`
-
- // A flag to conforming backends that data for a given request should be
- // seal wrapped
- SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""`
-}
-
-func (r *RequestWrapInfo) SentinelGet(key string) (interface{}, error) {
- if r == nil {
- return nil, nil
- }
- switch key {
- case "ttl":
- return r.TTL, nil
- case "ttl_seconds":
- return int64(r.TTL.Seconds()), nil
- }
-
- return nil, nil
-}
-
-func (r *RequestWrapInfo) SentinelKeys() []string {
- return []string{
- "ttl",
- "ttl_seconds",
- }
-}
-
-// Request is a struct that stores the parameters and context of a request
-// being made to Vault. It is used to abstract the details of the higher level
-// request protocol from the handlers.
-//
-// Note: Many of these have Sentinel disabled because they are values populated
-// by the router after policy checks; the token namespace would be the right
-// place to access them via Sentinel
-type Request struct {
- entReq
-
- // Id is the uuid associated with each request
- ID string `json:"id" structs:"id" mapstructure:"id" sentinel:""`
-
- // If set, the name given to the replication secondary where this request
- // originated
- ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster" mapstructure:"replication_cluster" sentinel:""`
-
- // Operation is the requested operation type
- Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"`
-
- // Path is the part of the request path not consumed by the
- // routing. As an example, if the original request path is "prod/aws/foo"
- // and the AWS logical backend is mounted at "prod/aws/", then the
- // final path is "foo" since the mount prefix is trimmed.
- Path string `json:"path" structs:"path" mapstructure:"path" sentinel:""`
-
- // Request data is an opaque map that must have string keys.
- Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"`
-
- // Storage can be used to durably store and retrieve state.
- Storage Storage `json:"-" sentinel:""`
-
- // Secret will be non-nil only for Revoke and Renew operations
- // to represent the secret that was returned prior.
- Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret" sentinel:""`
-
- // Auth will be non-nil only for Renew operations
- // to represent the auth that was returned prior.
- Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth" sentinel:""`
-
- // Headers will contain the http headers from the request. This value will
- // be used in the audit broker to ensure we are auditing only the allowed
- // headers.
- Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers" sentinel:""`
-
- // Connection will be non-nil only for credential providers to
- // inspect the connection information and potentially use it for
- // authentication/protection.
- Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"`
-
- // ClientToken is provided to the core so that the identity
- // can be verified and ACLs applied. This value is passed
- // through to the logical backends but after being salted and
- // hashed.
- ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token" sentinel:""`
-
- // ClientTokenAccessor is provided to the core so that the it can get
- // logged as part of request audit logging.
- ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor" sentinel:""`
-
- // DisplayName is provided to the logical backend to help associate
- // dynamic secrets with the source entity. This is not a sensitive
- // name, but is useful for operators.
- DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name" sentinel:""`
-
- // MountPoint is provided so that a logical backend can generate
- // paths relative to itself. The `Path` is effectively the client
- // request path with the MountPoint trimmed off.
- MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point" sentinel:""`
-
- // MountType is provided so that a logical backend can make decisions
- // based on the specific mount type (e.g., if a mount type has different
- // aliases, generating different defaults depending on the alias)
- MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type" sentinel:""`
-
- // MountAccessor is provided so that identities returned by the authentication
- // backends can be tied to the mount it belongs to.
- MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""`
-
- // WrapInfo contains requested response wrapping parameters
- WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""`
-
- // ClientTokenRemainingUses represents the allowed number of uses left on the
- // token supplied
- ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"`
-
- // EntityID is the identity of the caller extracted out of the token used
- // to make this request
- EntityID string `json:"entity_id" structs:"entity_id" mapstructure:"entity_id" sentinel:""`
-
- // PolicyOverride indicates that the requestor wishes to override
- // soft-mandatory Sentinel policies
- PolicyOverride bool `json:"policy_override" structs:"policy_override" mapstructure:"policy_override"`
-
- // Whether the request is unauthenticated, as in, had no client token
- // attached. Useful in some situations where the client token is not made
- // accessible.
- Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"`
-
- // MFACreds holds the parsed MFA information supplied over the API as part of
- // X-Vault-MFA header
- MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""`
-
- // Cached token entry. This avoids another lookup in request handling when
- // we've already looked it up at http handling time. Note that this token
- // has not been "used", as in it will not properly take into account use
- // count limitations. As a result this field should only ever be used for
- // transport to a function that would otherwise do a lookup and then
- // properly use the token.
- tokenEntry *TokenEntry
-
- // For replication, contains the last WAL on the remote side after handling
- // the request, used for best-effort avoidance of stale read-after-write
- lastRemoteWAL uint64
-}
-
-// Get returns a data field and guards for nil Data
-func (r *Request) Get(key string) interface{} {
- if r.Data == nil {
- return nil
- }
- return r.Data[key]
-}
-
-// GetString returns a data field as a string
-func (r *Request) GetString(key string) string {
- raw := r.Get(key)
- s, _ := raw.(string)
- return s
-}
-
-func (r *Request) GoString() string {
- return fmt.Sprintf("*%#v", *r)
-}
-
-func (r *Request) SentinelGet(key string) (interface{}, error) {
- switch key {
- case "path":
- // Sanitize it here so that it's consistent in policies
- return strings.TrimPrefix(r.Path, "/"), nil
-
- case "wrapping", "wrap_info":
- // If the pointer is nil accessing the wrap info is considered
- // "undefined" so this allows us to instead discover a TTL of zero
- if r.WrapInfo == nil {
- return &RequestWrapInfo{}, nil
- }
- return r.WrapInfo, nil
- }
-
- return nil, nil
-}
-
-func (r *Request) SentinelKeys() []string {
- return []string{
- "path",
- "wrapping",
- "wrap_info",
- }
-}
-
-func (r *Request) LastRemoteWAL() uint64 {
- return r.lastRemoteWAL
-}
-
-func (r *Request) SetLastRemoteWAL(last uint64) {
- r.lastRemoteWAL = last
-}
-
-func (r *Request) TokenEntry() *TokenEntry {
- return r.tokenEntry
-}
-
-func (r *Request) SetTokenEntry(te *TokenEntry) {
- r.tokenEntry = te
-}
-
-// RenewRequest creates the structure of the renew request.
-func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request {
- return &Request{
- Operation: RenewOperation,
- Path: path,
- Data: data,
- Secret: secret,
- }
-}
-
-// RenewAuthRequest creates the structure of the renew request for an auth.
-func RenewAuthRequest(path string, auth *Auth, data map[string]interface{}) *Request {
- return &Request{
- Operation: RenewOperation,
- Path: path,
- Data: data,
- Auth: auth,
- }
-}
-
-// RevokeRequest creates the structure of the revoke request.
-func RevokeRequest(path string, secret *Secret, data map[string]interface{}) *Request {
- return &Request{
- Operation: RevokeOperation,
- Path: path,
- Data: data,
- Secret: secret,
- }
-}
-
-// RollbackRequest creates the structure of the revoke request.
-func RollbackRequest(path string) *Request {
- return &Request{
- Operation: RollbackOperation,
- Path: path,
- Data: make(map[string]interface{}),
- }
-}
-
-// Operation is an enum that is used to specify the type
-// of request being made
-type Operation string
-
-const (
- // The operations below are called per path
- CreateOperation Operation = "create"
- ReadOperation = "read"
- UpdateOperation = "update"
- DeleteOperation = "delete"
- ListOperation = "list"
- HelpOperation = "help"
- AliasLookaheadOperation = "alias-lookahead"
-
- // The operations below are called globally, the path is less relevant.
- RevokeOperation Operation = "revoke"
- RenewOperation = "renew"
- RollbackOperation = "rollback"
-)
-
-type MFACreds map[string][]string
diff --git a/vendor/github.com/hashicorp/vault/logical/request_util.go b/vendor/github.com/hashicorp/vault/logical/request_util.go
deleted file mode 100644
index 38d6e3d5..00000000
--- a/vendor/github.com/hashicorp/vault/logical/request_util.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !enterprise
-
-package logical
-
-type entReq struct {
- ControlGroup interface{}
-}
-
-func (r *Request) EntReq() *entReq {
- return &entReq{}
-}
-
-func (r *Request) SetEntReq(*entReq) {
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/response.go b/vendor/github.com/hashicorp/vault/logical/response.go
deleted file mode 100644
index 02ffa34c..00000000
--- a/vendor/github.com/hashicorp/vault/logical/response.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package logical
-
-import (
- "encoding/json"
- "errors"
-
- "github.com/hashicorp/vault/helper/wrapping"
-)
-
-const (
- // HTTPContentType can be specified in the Data field of a Response
- // so that the HTTP front end can specify a custom Content-Type associated
- // with the HTTPRawBody. This can only be used for non-secrets, and should
- // be avoided unless absolutely necessary, such as implementing a specification.
- // The value must be a string.
- HTTPContentType = "http_content_type"
-
- // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType.
- // This can only be specified for non-secrets, and should should be similarly
- // avoided like the HTTPContentType. The value must be a byte slice.
- HTTPRawBody = "http_raw_body"
-
- // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType.
- // This can only be specified for non-secrets, and should should be similarly
- // avoided like the HTTPContentType. The value must be an integer.
- HTTPStatusCode = "http_status_code"
-
- // For unwrapping we may need to know whether the value contained in the
- // raw body is already JSON-unmarshaled. The presence of this key indicates
- // that it has already been unmarshaled. That way we don't need to simply
- // ignore errors.
- HTTPRawBodyAlreadyJSONDecoded = "http_raw_body_already_json_decoded"
-)
-
-// Response is a struct that stores the response of a request.
-// It is used to abstract the details of the higher level request protocol.
-type Response struct {
- // Secret, if not nil, denotes that this response represents a secret.
- Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"`
-
- // Auth, if not nil, contains the authentication information for
- // this response. This is only checked and means something for
- // credential backends.
- Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"`
-
- // Response data is an opaque map that must have string keys. For
- // secrets, this data is sent down to the user as-is. To store internal
- // data that you don't want the user to see, store it in
- // Secret.InternalData.
- Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"`
-
- // Redirect is an HTTP URL to redirect to for further authentication.
- // This is only valid for credential backends. This will be blanked
- // for any logical backend and ignored.
- Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"`
-
- // Warnings allow operations or backends to return warnings in response
- // to user actions without failing the action outright.
- Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"`
-
- // Information for wrapping the response in a cubbyhole
- WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"`
-}
-
-// AddWarning adds a warning into the response's warning list
-func (r *Response) AddWarning(warning string) {
- if r.Warnings == nil {
- r.Warnings = make([]string, 0, 1)
- }
- r.Warnings = append(r.Warnings, warning)
-}
-
-// IsError returns true if this response seems to indicate an error.
-func (r *Response) IsError() bool {
- return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil
-}
-
-func (r *Response) Error() error {
- if !r.IsError() {
- return nil
- }
- switch r.Data["error"].(type) {
- case string:
- return errors.New(r.Data["error"].(string))
- case error:
- return r.Data["error"].(error)
- }
- return nil
-}
-
-// HelpResponse is used to format a help response
-func HelpResponse(text string, seeAlso []string, oapiDoc interface{}) *Response {
- return &Response{
- Data: map[string]interface{}{
- "help": text,
- "see_also": seeAlso,
- "openapi": oapiDoc,
- },
- }
-}
-
-// ErrorResponse is used to format an error response
-func ErrorResponse(text string) *Response {
- return &Response{
- Data: map[string]interface{}{
- "error": text,
- },
- }
-}
-
-// ListResponse is used to format a response to a list operation.
-func ListResponse(keys []string) *Response {
- resp := &Response{
- Data: map[string]interface{}{},
- }
- if len(keys) != 0 {
- resp.Data["keys"] = keys
- }
- return resp
-}
-
-// ListResponseWithInfo is used to format a response to a list operation and
-// return the keys as well as a map with corresponding key info.
-func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response {
- resp := ListResponse(keys)
-
- keyInfoData := make(map[string]interface{})
- for _, key := range keys {
- val, ok := keyInfo[key]
- if ok {
- keyInfoData[key] = val
- }
- }
-
- if len(keyInfoData) > 0 {
- resp.Data["key_info"] = keyInfoData
- }
-
- return resp
-}
-
-// RespondWithStatusCode takes a response and converts it to a raw response with
-// the provided Status Code.
-func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, error) {
- ret := &Response{
- Data: map[string]interface{}{
- HTTPContentType: "application/json",
- HTTPStatusCode: code,
- },
- }
-
- if resp != nil {
- httpResp := LogicalResponseToHTTPResponse(resp)
-
- if req != nil {
- httpResp.RequestID = req.ID
- }
-
- body, err := json.Marshal(httpResp)
- if err != nil {
- return nil, err
- }
-
- // We default to string here so that the value is HMAC'd via audit.
- // Since this function is always marshaling to JSON, this is
- // appropriate.
- ret.Data[HTTPRawBody] = string(body)
- }
-
- return ret, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/response_util.go b/vendor/github.com/hashicorp/vault/logical/response_util.go
deleted file mode 100644
index b4df6323..00000000
--- a/vendor/github.com/hashicorp/vault/logical/response_util.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package logical
-
-import (
- "errors"
- "fmt"
- "net/http"
-
- "github.com/hashicorp/errwrap"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/consts"
-)
-
-// RespondErrorCommon pulls most of the functionality from http's
-// respondErrorCommon and some of http's handleLogical and makes it available
-// to both the http package and elsewhere.
-func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) {
- if err == nil && (resp == nil || !resp.IsError()) {
- switch {
- case req.Operation == ReadOperation:
- if resp == nil {
- return http.StatusNotFound, nil
- }
-
- // Basically: if we have empty "keys" or no keys at all, 404. This
- // provides consistency with GET.
- case req.Operation == ListOperation && resp.WrapInfo == nil:
- if resp == nil {
- return http.StatusNotFound, nil
- }
- if len(resp.Data) == 0 {
- if len(resp.Warnings) > 0 {
- return 0, nil
- }
- return http.StatusNotFound, nil
- }
- keysRaw, ok := resp.Data["keys"]
- if !ok || keysRaw == nil {
- // If we don't have keys but have other data, return as-is
- if len(resp.Data) > 0 || len(resp.Warnings) > 0 {
- return 0, nil
- }
- return http.StatusNotFound, nil
- }
-
- var keys []string
- switch keysRaw.(type) {
- case []interface{}:
- keys = make([]string, len(keysRaw.([]interface{})))
- for i, el := range keysRaw.([]interface{}) {
- s, ok := el.(string)
- if !ok {
- return http.StatusInternalServerError, nil
- }
- keys[i] = s
- }
-
- case []string:
- keys = keysRaw.([]string)
- default:
- return http.StatusInternalServerError, nil
- }
-
- if len(keys) == 0 {
- return http.StatusNotFound, nil
- }
- }
-
- return 0, nil
- }
-
- if errwrap.ContainsType(err, new(ReplicationCodedError)) {
- var allErrors error
- var codedErr *ReplicationCodedError
- errwrap.Walk(err, func(inErr error) {
- newErr, ok := inErr.(*ReplicationCodedError)
- if ok {
- codedErr = newErr
- } else {
- allErrors = multierror.Append(allErrors, inErr)
- }
- })
- if allErrors != nil {
- return codedErr.Code, multierror.Append(errors.New(fmt.Sprintf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg)), allErrors)
- }
- return codedErr.Code, errors.New(codedErr.Msg)
- }
-
- // Start out with internal server error since in most of these cases there
- // won't be a response so this won't be overridden
- statusCode := http.StatusInternalServerError
- // If we actually have a response, start out with bad request
- if resp != nil {
- statusCode = http.StatusBadRequest
- }
-
- // Now, check the error itself; if it has a specific logical error, set the
- // appropriate code
- if err != nil {
- switch {
- case errwrap.ContainsType(err, new(StatusBadRequest)):
- statusCode = http.StatusBadRequest
- case errwrap.Contains(err, ErrPermissionDenied.Error()):
- statusCode = http.StatusForbidden
- case errwrap.Contains(err, ErrUnsupportedOperation.Error()):
- statusCode = http.StatusMethodNotAllowed
- case errwrap.Contains(err, ErrUnsupportedPath.Error()):
- statusCode = http.StatusNotFound
- case errwrap.Contains(err, ErrInvalidRequest.Error()):
- statusCode = http.StatusBadRequest
- case errwrap.Contains(err, ErrUpstreamRateLimited.Error()):
- statusCode = http.StatusBadGateway
- }
- }
-
- if resp != nil && resp.IsError() {
- err = fmt.Errorf("%s", resp.Data["error"].(string))
- }
-
- return statusCode, err
-}
-
-// AdjustErrorStatusCode adjusts the status that will be sent in error
-// conditions in a way that can be shared across http's respondError and other
-// locations.
-func AdjustErrorStatusCode(status *int, err error) {
- // Handle nested errors
- if t, ok := err.(*multierror.Error); ok {
- for _, e := range t.Errors {
- AdjustErrorStatusCode(status, e)
- }
- }
-
- // Adjust status code when sealed
- if errwrap.Contains(err, consts.ErrSealed.Error()) {
- *status = http.StatusServiceUnavailable
- }
-
- // Adjust status code on
- if errwrap.Contains(err, "http: request body too large") {
- *status = http.StatusRequestEntityTooLarge
- }
-
- // Allow HTTPCoded error passthrough to specify a code
- if t, ok := err.(HTTPCodedError); ok {
- *status = t.Code()
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/secret.go b/vendor/github.com/hashicorp/vault/logical/secret.go
deleted file mode 100644
index a2128d86..00000000
--- a/vendor/github.com/hashicorp/vault/logical/secret.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package logical
-
-import "fmt"
-
-// Secret represents the secret part of a response.
-type Secret struct {
- LeaseOptions
-
- // InternalData is JSON-encodable data that is stored with the secret.
- // This will be sent back during a Renew/Revoke for storing internal data
- // used for those operations.
- InternalData map[string]interface{} `json:"internal_data" sentinel:""`
-
- // LeaseID is the ID returned to the user to manage this secret.
- // This is generated by Vault core. Any set value will be ignored.
- // For requests, this will always be blank.
- LeaseID string `sentinel:""`
-}
-
-func (s *Secret) Validate() error {
- if s.TTL < 0 {
- return fmt.Errorf("ttl duration must not be less than zero")
- }
-
- return nil
-}
-
-func (s *Secret) GoString() string {
- return fmt.Sprintf("*%#v", *s)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage.go b/vendor/github.com/hashicorp/vault/logical/storage.go
deleted file mode 100644
index 116fd301..00000000
--- a/vendor/github.com/hashicorp/vault/logical/storage.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package logical
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-// ErrReadOnly is returned when a backend does not support
-// writing. This can be caused by a read-only replica or secondary
-// cluster operation.
-var ErrReadOnly = errors.New("cannot write to readonly storage")
-
-// ErrSetupReadOnly is returned when a write operation is attempted on a
-// storage while the backend is still being setup.
-var ErrSetupReadOnly = errors.New("cannot write to storage during setup")
-
-// Storage is the way that logical backends are able read/write data.
-type Storage interface {
- List(context.Context, string) ([]string, error)
- Get(context.Context, string) (*StorageEntry, error)
- Put(context.Context, *StorageEntry) error
- Delete(context.Context, string) error
-}
-
-// StorageEntry is the entry for an item in a Storage implementation.
-type StorageEntry struct {
- Key string
- Value []byte
- SealWrap bool
-}
-
-// DecodeJSON decodes the 'Value' present in StorageEntry.
-func (e *StorageEntry) DecodeJSON(out interface{}) error {
- return jsonutil.DecodeJSON(e.Value, out)
-}
-
-// StorageEntryJSON creates a StorageEntry with a JSON-encoded value.
-func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) {
- encodedBytes, err := jsonutil.EncodeJSON(v)
- if err != nil {
- return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err)
- }
-
- return &StorageEntry{
- Key: k,
- Value: encodedBytes,
- }, nil
-}
-
-type ClearableView interface {
- List(context.Context, string) ([]string, error)
- Delete(context.Context, string) error
-}
-
-// ScanView is used to scan all the keys in a view iteratively
-func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error {
- frontier := []string{""}
- for len(frontier) > 0 {
- n := len(frontier)
- current := frontier[n-1]
- frontier = frontier[:n-1]
-
- // List the contents
- contents, err := view.List(ctx, current)
- if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err)
- }
-
- // Handle the contents in the directory
- for _, c := range contents {
- fullPath := current + c
- if strings.HasSuffix(c, "/") {
- frontier = append(frontier, fullPath)
- } else {
- cb(fullPath)
- }
- }
- }
- return nil
-}
-
-// CollectKeys is used to collect all the keys in a view
-func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) {
- // Accumulate the keys
- var existing []string
- cb := func(path string) {
- existing = append(existing, path)
- }
-
- // Scan for all the keys
- if err := ScanView(ctx, view, cb); err != nil {
- return nil, err
- }
- return existing, nil
-}
-
-// ClearView is used to delete all the keys in a view
-func ClearView(ctx context.Context, view ClearableView) error {
- if view == nil {
- return nil
- }
-
- // Collect all the keys
- keys, err := CollectKeys(ctx, view)
- if err != nil {
- return err
- }
-
- // Delete all the keys
- for _, key := range keys {
- if err := view.Delete(ctx, key); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
deleted file mode 100644
index e0ff75f1..00000000
--- a/vendor/github.com/hashicorp/vault/logical/storage_inmem.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package logical
-
-import (
- "context"
- "sync"
-
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/physical/inmem"
-)
-
-// InmemStorage implements Storage and stores all data in memory. It is
-// basically a straight copy of physical.Inmem, but it prevents backends from
-// having to load all of physical's dependencies (which are legion) just to
-// have some testing storage.
-type InmemStorage struct {
- underlying physical.Backend
- once sync.Once
-}
-
-func (s *InmemStorage) Get(ctx context.Context, key string) (*StorageEntry, error) {
- s.once.Do(s.init)
-
- entry, err := s.underlying.Get(ctx, key)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- return &StorageEntry{
- Key: entry.Key,
- Value: entry.Value,
- SealWrap: entry.SealWrap,
- }, nil
-}
-
-func (s *InmemStorage) Put(ctx context.Context, entry *StorageEntry) error {
- s.once.Do(s.init)
-
- return s.underlying.Put(ctx, &physical.Entry{
- Key: entry.Key,
- Value: entry.Value,
- SealWrap: entry.SealWrap,
- })
-}
-
-func (s *InmemStorage) Delete(ctx context.Context, key string) error {
- s.once.Do(s.init)
-
- return s.underlying.Delete(ctx, key)
-}
-
-func (s *InmemStorage) List(ctx context.Context, prefix string) ([]string, error) {
- s.once.Do(s.init)
-
- return s.underlying.List(ctx, prefix)
-}
-
-func (s *InmemStorage) Underlying() *inmem.InmemBackend {
- s.once.Do(s.init)
-
- return s.underlying.(*inmem.InmemBackend)
-}
-
-func (s *InmemStorage) init() {
- s.underlying, _ = inmem.NewInmem(nil, nil)
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/system_view.go b/vendor/github.com/hashicorp/vault/logical/system_view.go
deleted file mode 100644
index dff258b1..00000000
--- a/vendor/github.com/hashicorp/vault/logical/system_view.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package logical
-
-import (
- "context"
- "errors"
- "time"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/license"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
-)
-
-// SystemView exposes system configuration information in a safe way
-// for logical backends to consume
-type SystemView interface {
- // DefaultLeaseTTL returns the default lease TTL set in Vault configuration
- DefaultLeaseTTL() time.Duration
-
- // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend
- // authors should take care not to issue credentials that last longer than
- // this value, as Vault will revoke them
- MaxLeaseTTL() time.Duration
-
- // SudoPrivilege returns true if given path has sudo privileges
- // for the given client token
- SudoPrivilege(ctx context.Context, path string, token string) bool
-
- // Returns true if the mount is tainted. A mount is tainted if it is in the
- // process of being unmounted. This should only be used in special
- // circumstances; a primary use-case is as a guard in revocation functions.
- // If revocation of a backend's leases fails it can keep the unmounting
- // process from being successful. If the reason for this failure is not
- // relevant when the mount is tainted (for instance, saving a CRL to disk
- // when the stored CRL will be removed during the unmounting process
- // anyways), we can ignore the errors to allow unmounting to complete.
- Tainted() bool
-
- // Returns true if caching is disabled. If true, no caches should be used,
- // despite known slowdowns.
- CachingDisabled() bool
-
- // When run from a system view attached to a request, indicates whether the
- // request is affecting a local mount or not
- LocalMount() bool
-
- // ReplicationState indicates the state of cluster replication
- ReplicationState() consts.ReplicationState
-
- // HasFeature returns true if the feature is currently enabled
- HasFeature(feature license.Features) bool
-
- // ResponseWrapData wraps the given data in a cubbyhole and returns the
- // token used to unwrap.
- ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error)
-
- // LookupPlugin looks into the plugin catalog for a plugin with the given
- // name. Returns a PluginRunner or an error if a plugin can not be found.
- LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error)
-
- // MlockEnabled returns the configuration setting for enabling mlock on
- // plugins.
- MlockEnabled() bool
-
- // EntityInfo returns a subset of information related to the identity entity
- // for the given entity id
- EntityInfo(entityID string) (*Entity, error)
-
- // PluginEnv returns Vault environment information used by plugins
- PluginEnv(context.Context) (*PluginEnvironment, error)
-}
-
-type StaticSystemView struct {
- DefaultLeaseTTLVal time.Duration
- MaxLeaseTTLVal time.Duration
- SudoPrivilegeVal bool
- TaintedVal bool
- CachingDisabledVal bool
- Primary bool
- EnableMlock bool
- LocalMountVal bool
- ReplicationStateVal consts.ReplicationState
- EntityVal *Entity
- Features license.Features
- VaultVersion string
- PluginEnvironment *PluginEnvironment
-}
-
-func (d StaticSystemView) DefaultLeaseTTL() time.Duration {
- return d.DefaultLeaseTTLVal
-}
-
-func (d StaticSystemView) MaxLeaseTTL() time.Duration {
- return d.MaxLeaseTTLVal
-}
-
-func (d StaticSystemView) SudoPrivilege(_ context.Context, path string, token string) bool {
- return d.SudoPrivilegeVal
-}
-
-func (d StaticSystemView) Tainted() bool {
- return d.TaintedVal
-}
-
-func (d StaticSystemView) CachingDisabled() bool {
- return d.CachingDisabledVal
-}
-
-func (d StaticSystemView) LocalMount() bool {
- return d.LocalMountVal
-}
-
-func (d StaticSystemView) ReplicationState() consts.ReplicationState {
- return d.ReplicationStateVal
-}
-
-func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView")
-}
-
-func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) {
- return nil, errors.New("LookupPlugin is not implemented in StaticSystemView")
-}
-
-func (d StaticSystemView) MlockEnabled() bool {
- return d.EnableMlock
-}
-
-func (d StaticSystemView) EntityInfo(entityID string) (*Entity, error) {
- return d.EntityVal, nil
-}
-
-func (d StaticSystemView) HasFeature(feature license.Features) bool {
- return d.Features.HasFeature(feature)
-}
-
-func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, error) {
- return d.PluginEnvironment, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/testing.go b/vendor/github.com/hashicorp/vault/logical/testing.go
deleted file mode 100644
index 7c773899..00000000
--- a/vendor/github.com/hashicorp/vault/logical/testing.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package logical
-
-import (
- "context"
- "reflect"
- "time"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/vault/helper/logging"
- "github.com/mitchellh/go-testing-interface"
-)
-
-// TestRequest is a helper to create a purely in-memory Request struct.
-func TestRequest(t testing.T, op Operation, path string) *Request {
- return &Request{
- Operation: op,
- Path: path,
- Data: make(map[string]interface{}),
- Storage: new(InmemStorage),
- }
-}
-
-// TestStorage is a helper that can be used from unit tests to verify
-// the behavior of a Storage impl.
-func TestStorage(t testing.T, s Storage) {
- keys, err := s.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list error: %s", err)
- }
- if len(keys) > 0 {
- t.Fatalf("should have no keys to start: %#v", keys)
- }
-
- entry := &StorageEntry{Key: "foo", Value: []byte("bar")}
- if err := s.Put(context.Background(), entry); err != nil {
- t.Fatalf("put error: %s", err)
- }
-
- actual, err := s.Get(context.Background(), "foo")
- if err != nil {
- t.Fatalf("get error: %s", err)
- }
- if !reflect.DeepEqual(actual, entry) {
- t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual)
- }
-
- keys, err = s.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list error: %s", err)
- }
- if !reflect.DeepEqual(keys, []string{"foo"}) {
- t.Fatalf("bad keys: %#v", keys)
- }
-
- if err := s.Delete(context.Background(), "foo"); err != nil {
- t.Fatalf("put error: %s", err)
- }
-
- keys, err = s.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list error: %s", err)
- }
- if len(keys) > 0 {
- t.Fatalf("should have no keys to start: %#v", keys)
- }
-}
-
-func TestSystemView() *StaticSystemView {
- defaultLeaseTTLVal := time.Hour * 24
- maxLeaseTTLVal := time.Hour * 24 * 2
- return &StaticSystemView{
- DefaultLeaseTTLVal: defaultLeaseTTLVal,
- MaxLeaseTTLVal: maxLeaseTTLVal,
- }
-}
-
-func TestBackendConfig() *BackendConfig {
- bc := &BackendConfig{
- Logger: logging.NewVaultLogger(log.Trace),
- System: TestSystemView(),
- }
-
- return bc
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/token.go b/vendor/github.com/hashicorp/vault/logical/token.go
deleted file mode 100644
index c6212a36..00000000
--- a/vendor/github.com/hashicorp/vault/logical/token.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package logical
-
-import (
- "time"
-
- sockaddr "github.com/hashicorp/go-sockaddr"
-)
-
-type TokenType uint8
-
-const (
- // TokenTypeDefault means "use the default, if any, that is currently set
- // on the mount". If not set, results in a Service token.
- TokenTypeDefault TokenType = iota
-
- // TokenTypeService is a "normal" Vault token for long-lived services
- TokenTypeService
-
- // TokenTypeBatch is a batch token
- TokenTypeBatch
-
- // TokenTypeDefaultService, configured on a mount, means that if
- // TokenTypeDefault is sent back by the mount, create Service tokens
- TokenTypeDefaultService
-
- // TokenTypeDefaultBatch, configured on a mount, means that if
- // TokenTypeDefault is sent back by the mount, create Batch tokens
- TokenTypeDefaultBatch
-)
-
-func (t TokenType) String() string {
- switch t {
- case TokenTypeDefault:
- return "default"
- case TokenTypeService:
- return "service"
- case TokenTypeBatch:
- return "batch"
- case TokenTypeDefaultService:
- return "default-service"
- case TokenTypeDefaultBatch:
- return "default-batch"
- default:
- panic("unreachable")
- }
-}
-
-// TokenEntry is used to represent a given token
-type TokenEntry struct {
- Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""`
-
- // ID of this entry, generally a random UUID
- ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""`
-
- // Accessor for this token, a random UUID
- Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""`
-
- // Parent token, used for revocation trees
- Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""`
-
- // Which named policies should be used
- Policies []string `json:"policies" mapstructure:"policies" structs:"policies"`
-
- // Used for audit trails, this is something like "auth/user/login"
- Path string `json:"path" mapstructure:"path" structs:"path"`
-
- // Used for auditing. This could include things like "source", "user", "ip"
- Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"`
-
- // Used for operators to be able to associate with the source
- DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"`
-
- // Used to restrict the number of uses (zero is unlimited). This is to
- // support one-time-tokens (generalized). There are a few special values:
- // if it's -1 it has run through its use counts and is executing its final
- // use; if it's -2 it is tainted, which means revocation is currently
- // running on it; and if it's -3 it's also tainted but revocation
- // previously ran and failed, so this hints the tidy function to try it
- // again.
- NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
-
- // Time of token creation
- CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""`
-
- // Duration set when token was created
- TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""`
-
- // Explicit maximum TTL on the token
- ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""`
-
- // If set, the role that was used for parameters at creation time
- Role string `json:"role" mapstructure:"role" structs:"role"`
-
- // If set, the period of the token. This is only used when created directly
- // through the create endpoint; periods managed by roles or other auth
- // backends are subject to those renewal rules.
- Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""`
-
- // These are the deprecated fields
- DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""`
- NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""`
- CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""`
- ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""`
-
- EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"`
-
- // The set of CIDRs that this token can be used with
- BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"`
-
- // NamespaceID is the identifier of the namespace to which this token is
- // confined to. Do not return this value over the API when the token is
- // being looked up.
- NamespaceID string `json:"namespace_id" mapstructure:"namespace_id" structs:"namespace_id" sentinel:""`
-
- // CubbyholeID is the identifier of the cubbyhole storage belonging to this
- // token
- CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""`
-}
-
-func (te *TokenEntry) SentinelGet(key string) (interface{}, error) {
- if te == nil {
- return nil, nil
- }
- switch key {
- case "period":
- return te.Period, nil
-
- case "period_seconds":
- return int64(te.Period.Seconds()), nil
-
- case "explicit_max_ttl":
- return te.ExplicitMaxTTL, nil
-
- case "explicit_max_ttl_seconds":
- return int64(te.ExplicitMaxTTL.Seconds()), nil
-
- case "creation_ttl":
- return te.TTL, nil
-
- case "creation_ttl_seconds":
- return int64(te.TTL.Seconds()), nil
-
- case "creation_time":
- return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil
-
- case "creation_time_unix":
- return time.Unix(te.CreationTime, 0), nil
-
- case "meta", "metadata":
- return te.Meta, nil
-
- case "type":
- teType := te.Type
- switch teType {
- case TokenTypeBatch, TokenTypeService:
- case TokenTypeDefault:
- teType = TokenTypeService
- default:
- return "unknown", nil
- }
- return teType.String(), nil
- }
-
- return nil, nil
-}
-
-func (te *TokenEntry) SentinelKeys() []string {
- return []string{
- "period",
- "period_seconds",
- "explicit_max_ttl",
- "explicit_max_ttl_seconds",
- "creation_ttl",
- "creation_ttl_seconds",
- "creation_time",
- "creation_time_unix",
- "meta",
- "metadata",
- "type",
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/logical/translate_response.go b/vendor/github.com/hashicorp/vault/logical/translate_response.go
deleted file mode 100644
index 11714c22..00000000
--- a/vendor/github.com/hashicorp/vault/logical/translate_response.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package logical
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "time"
-)
-
-// This logic was pulled from the http package so that it can be used for
-// encoding wrapped responses as well. It simply translates the logical
-// response to an http response, with the values we want and omitting the
-// values we don't.
-func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse {
- httpResp := &HTTPResponse{
- Data: input.Data,
- Warnings: input.Warnings,
- }
-
- if input.Secret != nil {
- httpResp.LeaseID = input.Secret.LeaseID
- httpResp.Renewable = input.Secret.Renewable
- httpResp.LeaseDuration = int(input.Secret.TTL.Seconds())
- }
-
- // If we have authentication information, then
- // set up the result structure.
- if input.Auth != nil {
- httpResp.Auth = &HTTPAuth{
- ClientToken: input.Auth.ClientToken,
- Accessor: input.Auth.Accessor,
- Policies: input.Auth.Policies,
- TokenPolicies: input.Auth.TokenPolicies,
- IdentityPolicies: input.Auth.IdentityPolicies,
- Metadata: input.Auth.Metadata,
- LeaseDuration: int(input.Auth.TTL.Seconds()),
- Renewable: input.Auth.Renewable,
- EntityID: input.Auth.EntityID,
- TokenType: input.Auth.TokenType.String(),
- }
- }
-
- return httpResp
-}
-
-func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response {
- logicalResp := &Response{
- Data: input.Data,
- Warnings: input.Warnings,
- }
-
- if input.LeaseID != "" {
- logicalResp.Secret = &Secret{
- LeaseID: input.LeaseID,
- }
- logicalResp.Secret.Renewable = input.Renewable
- logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration)
- }
-
- if input.Auth != nil {
- logicalResp.Auth = &Auth{
- ClientToken: input.Auth.ClientToken,
- Accessor: input.Auth.Accessor,
- Policies: input.Auth.Policies,
- TokenPolicies: input.Auth.TokenPolicies,
- IdentityPolicies: input.Auth.IdentityPolicies,
- Metadata: input.Auth.Metadata,
- EntityID: input.Auth.EntityID,
- }
- logicalResp.Auth.Renewable = input.Auth.Renewable
- logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration)
- switch input.Auth.TokenType {
- case "service":
- logicalResp.Auth.TokenType = TokenTypeService
- case "batch":
- logicalResp.Auth.TokenType = TokenTypeBatch
- }
- }
-
- return logicalResp
-}
-
-type HTTPResponse struct {
- RequestID string `json:"request_id"`
- LeaseID string `json:"lease_id"`
- Renewable bool `json:"renewable"`
- LeaseDuration int `json:"lease_duration"`
- Data map[string]interface{} `json:"data"`
- WrapInfo *HTTPWrapInfo `json:"wrap_info"`
- Warnings []string `json:"warnings"`
- Auth *HTTPAuth `json:"auth"`
-}
-
-type HTTPAuth struct {
- ClientToken string `json:"client_token"`
- Accessor string `json:"accessor"`
- Policies []string `json:"policies"`
- TokenPolicies []string `json:"token_policies,omitempty"`
- IdentityPolicies []string `json:"identity_policies,omitempty"`
- Metadata map[string]string `json:"metadata"`
- LeaseDuration int `json:"lease_duration"`
- Renewable bool `json:"renewable"`
- EntityID string `json:"entity_id"`
- TokenType string `json:"token_type"`
-}
-
-type HTTPWrapInfo struct {
- Token string `json:"token"`
- Accessor string `json:"accessor"`
- TTL int `json:"ttl"`
- CreationTime string `json:"creation_time"`
- CreationPath string `json:"creation_path"`
- WrappedAccessor string `json:"wrapped_accessor,omitempty"`
-}
-
-type HTTPSysInjector struct {
- Response *HTTPResponse
-}
-
-func (h HTTPSysInjector) MarshalJSON() ([]byte, error) {
- j, err := json.Marshal(h.Response)
- if err != nil {
- return nil, err
- }
- // Fast path no data or empty data
- if h.Response.Data == nil || len(h.Response.Data) == 0 {
- return j, nil
- }
- // Marshaling a response will always be a JSON object, meaning it will
- // always start with '{', so we hijack this to prepend necessary values
- // Make a guess at the capacity, and write the object opener
- buf := bytes.NewBuffer(make([]byte, 0, len(j)*2))
- buf.WriteRune('{')
- for k, v := range h.Response.Data {
- // Marshal each key/value individually
- mk, err := json.Marshal(k)
- if err != nil {
- return nil, err
- }
- mv, err := json.Marshal(v)
- if err != nil {
- return nil, err
- }
- // Write into the final buffer. We'll never have a valid response
- // without any fields so we can unconditionally add a comma after each.
- buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv))
- }
- // Add the rest, without the first '{'
- buf.Write(j[1:])
- return buf.Bytes(), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/cache.go b/vendor/github.com/hashicorp/vault/physical/cache.go
deleted file mode 100644
index af6a39b8..00000000
--- a/vendor/github.com/hashicorp/vault/physical/cache.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package physical
-
-import (
- "context"
- "sync/atomic"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/golang-lru"
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/helper/pathmanager"
-)
-
-const (
- // DefaultCacheSize is used if no cache size is specified for NewCache
- DefaultCacheSize = 128 * 1024
-)
-
-// These paths don't need to be cached by the LRU cache. This should
-// particularly help memory pressure when unsealing.
-var cacheExceptionsPaths = []string{
- "wal/logs/",
- "index/pages/",
- "index-dr/pages/",
- "sys/expire/",
- "core/poison-pill",
-}
-
-// Cache is used to wrap an underlying physical backend
-// and provide an LRU cache layer on top. Most of the reads done by
-// Vault are for policy objects so there is a large read reduction
-// by using a simple write-through cache.
-type Cache struct {
- backend Backend
- lru *lru.TwoQueueCache
- locks []*locksutil.LockEntry
- logger log.Logger
- enabled *uint32
- cacheExceptions *pathmanager.PathManager
-}
-
-// TransactionalCache is a Cache that wraps the physical that is transactional
-type TransactionalCache struct {
- *Cache
- Transactional
-}
-
-// Verify Cache satisfies the correct interfaces
-var _ ToggleablePurgemonster = (*Cache)(nil)
-var _ ToggleablePurgemonster = (*TransactionalCache)(nil)
-var _ Backend = (*Cache)(nil)
-var _ Transactional = (*TransactionalCache)(nil)
-
-// NewCache returns a physical cache of the given size.
-// If no size is provided, the default size is used.
-func NewCache(b Backend, size int, logger log.Logger) *Cache {
- if logger.IsDebug() {
- logger.Debug("creating LRU cache", "size", size)
- }
- if size <= 0 {
- size = DefaultCacheSize
- }
-
- pm := pathmanager.New()
- pm.AddPaths(cacheExceptionsPaths)
-
- cache, _ := lru.New2Q(size)
- c := &Cache{
- backend: b,
- lru: cache,
- locks: locksutil.CreateLocks(),
- logger: logger,
- // This fails safe.
- enabled: new(uint32),
- cacheExceptions: pm,
- }
- return c
-}
-
-func NewTransactionalCache(b Backend, size int, logger log.Logger) *TransactionalCache {
- c := &TransactionalCache{
- Cache: NewCache(b, size, logger),
- Transactional: b.(Transactional),
- }
- return c
-}
-
-func (c *Cache) shouldCache(key string) bool {
- if atomic.LoadUint32(c.enabled) == 0 {
- return false
- }
-
- return !c.cacheExceptions.HasPath(key)
-}
-
-// SetEnabled is used to toggle whether the cache is on or off. It must be
-// called with true to actually activate the cache after creation.
-func (c *Cache) SetEnabled(enabled bool) {
- if enabled {
- atomic.StoreUint32(c.enabled, 1)
- return
- }
- atomic.StoreUint32(c.enabled, 0)
-}
-
-// Purge is used to clear the cache
-func (c *Cache) Purge(ctx context.Context) {
- // Lock the world
- for _, lock := range c.locks {
- lock.Lock()
- defer lock.Unlock()
- }
-
- c.lru.Purge()
-}
-
-func (c *Cache) Put(ctx context.Context, entry *Entry) error {
- if entry != nil && !c.shouldCache(entry.Key) {
- return c.backend.Put(ctx, entry)
- }
-
- lock := locksutil.LockForKey(c.locks, entry.Key)
- lock.Lock()
- defer lock.Unlock()
-
- err := c.backend.Put(ctx, entry)
- if err == nil {
- c.lru.Add(entry.Key, entry)
- }
- return err
-}
-
-func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) {
- if !c.shouldCache(key) {
- return c.backend.Get(ctx, key)
- }
-
- lock := locksutil.LockForKey(c.locks, key)
- lock.RLock()
- defer lock.RUnlock()
-
- // Check the LRU first
- if raw, ok := c.lru.Get(key); ok {
- if raw == nil {
- return nil, nil
- }
- return raw.(*Entry), nil
- }
-
- // Read from the underlying backend
- ent, err := c.backend.Get(ctx, key)
- if err != nil {
- return nil, err
- }
-
- // Cache the result
- c.lru.Add(key, ent)
-
- return ent, nil
-}
-
-func (c *Cache) Delete(ctx context.Context, key string) error {
- if !c.shouldCache(key) {
- return c.backend.Delete(ctx, key)
- }
-
- lock := locksutil.LockForKey(c.locks, key)
- lock.Lock()
- defer lock.Unlock()
-
- err := c.backend.Delete(ctx, key)
- if err == nil {
- c.lru.Remove(key)
- }
- return err
-}
-
-func (c *Cache) List(ctx context.Context, prefix string) ([]string, error) {
- // Always pass-through as this would be difficult to cache. For the same
- // reason we don't lock as we can't reasonably know which locks to readlock
- // ahead of time.
- return c.backend.List(ctx, prefix)
-}
-
-func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) error {
- // Bypass the locking below
- if atomic.LoadUint32(c.enabled) == 0 {
- return c.Transactional.Transaction(ctx, txns)
- }
-
- // Collect keys that need to be locked
- var keys []string
- for _, curr := range txns {
- keys = append(keys, curr.Entry.Key)
- }
- // Lock the keys
- for _, l := range locksutil.LocksForKeys(c.locks, keys) {
- l.Lock()
- defer l.Unlock()
- }
-
- if err := c.Transactional.Transaction(ctx, txns); err != nil {
- return err
- }
-
- for _, txn := range txns {
- if !c.shouldCache(txn.Entry.Key) {
- continue
- }
-
- switch txn.Operation {
- case PutOperation:
- c.lru.Add(txn.Entry.Key, txn.Entry)
- case DeleteOperation:
- c.lru.Remove(txn.Entry.Key)
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/encoding.go b/vendor/github.com/hashicorp/vault/physical/encoding.go
deleted file mode 100644
index d2f93478..00000000
--- a/vendor/github.com/hashicorp/vault/physical/encoding.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package physical
-
-import (
- "context"
- "errors"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-var ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters")
-var ErrNonPrintable = errors.New("key contains non-printable characters")
-
-// StorageEncoding is used to add errors into underlying physical requests
-type StorageEncoding struct {
- Backend
-}
-
-// TransactionalStorageEncoding is the transactional version of the error
-// injector
-type TransactionalStorageEncoding struct {
- *StorageEncoding
- Transactional
-}
-
-// Verify StorageEncoding satisfies the correct interfaces
-var _ Backend = (*StorageEncoding)(nil)
-var _ Transactional = (*TransactionalStorageEncoding)(nil)
-
-// NewStorageEncoding returns a wrapped physical backend and verifies the key
-// encoding
-func NewStorageEncoding(b Backend) Backend {
- enc := &StorageEncoding{
- Backend: b,
- }
-
- if bTxn, ok := b.(Transactional); ok {
- return &TransactionalStorageEncoding{
- StorageEncoding: enc,
- Transactional: bTxn,
- }
- }
-
- return enc
-}
-
-func (e *StorageEncoding) containsNonPrintableChars(key string) bool {
- idx := strings.IndexFunc(key, func(c rune) bool {
- return !unicode.IsPrint(c)
- })
-
- return idx != -1
-}
-
-func (e *StorageEncoding) Put(ctx context.Context, entry *Entry) error {
- if !utf8.ValidString(entry.Key) {
- return ErrNonUTF8
- }
-
- if e.containsNonPrintableChars(entry.Key) {
- return ErrNonPrintable
- }
-
- return e.Backend.Put(ctx, entry)
-}
-
-func (e *StorageEncoding) Delete(ctx context.Context, key string) error {
- if !utf8.ValidString(key) {
- return ErrNonUTF8
- }
-
- if e.containsNonPrintableChars(key) {
- return ErrNonPrintable
- }
-
- return e.Backend.Delete(ctx, key)
-}
-
-func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []*TxnEntry) error {
- for _, txn := range txns {
- if !utf8.ValidString(txn.Entry.Key) {
- return ErrNonUTF8
- }
-
- if e.containsNonPrintableChars(txn.Entry.Key) {
- return ErrNonPrintable
- }
-
- }
-
- return e.Transactional.Transaction(ctx, txns)
-}
-
-func (e *StorageEncoding) Purge(ctx context.Context) {
- if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok {
- purgeable.Purge(ctx)
- }
-}
-
-func (e *StorageEncoding) SetEnabled(enabled bool) {
- if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok {
- purgeable.SetEnabled(enabled)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/error.go b/vendor/github.com/hashicorp/vault/physical/error.go
deleted file mode 100644
index d4c6f80e..00000000
--- a/vendor/github.com/hashicorp/vault/physical/error.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package physical
-
-import (
- "context"
- "errors"
- "math/rand"
- "time"
-
- log "github.com/hashicorp/go-hclog"
-)
-
-const (
- // DefaultErrorPercent is used to determin how often we error
- DefaultErrorPercent = 20
-)
-
-// ErrorInjector is used to add errors into underlying physical requests
-type ErrorInjector struct {
- backend Backend
- errorPercent int
- random *rand.Rand
-}
-
-// TransactionalErrorInjector is the transactional version of the error
-// injector
-type TransactionalErrorInjector struct {
- *ErrorInjector
- Transactional
-}
-
-// Verify ErrorInjector satisfies the correct interfaces
-var _ Backend = (*ErrorInjector)(nil)
-var _ Transactional = (*TransactionalErrorInjector)(nil)
-
-// NewErrorInjector returns a wrapped physical backend to inject error
-func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector {
- if errorPercent < 0 || errorPercent > 100 {
- errorPercent = DefaultErrorPercent
- }
- logger.Info("creating error injector")
-
- return &ErrorInjector{
- backend: b,
- errorPercent: errorPercent,
- random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- }
-}
-
-// NewTransactionalErrorInjector creates a new transactional ErrorInjector
-func NewTransactionalErrorInjector(b Backend, errorPercent int, logger log.Logger) *TransactionalErrorInjector {
- return &TransactionalErrorInjector{
- ErrorInjector: NewErrorInjector(b, errorPercent, logger),
- Transactional: b.(Transactional),
- }
-}
-
-func (e *ErrorInjector) SetErrorPercentage(p int) {
- e.errorPercent = p
-}
-
-func (e *ErrorInjector) addError() error {
- roll := e.random.Intn(100)
- if roll < e.errorPercent {
- return errors.New("random error")
- }
-
- return nil
-}
-
-func (e *ErrorInjector) Put(ctx context.Context, entry *Entry) error {
- if err := e.addError(); err != nil {
- return err
- }
- return e.backend.Put(ctx, entry)
-}
-
-func (e *ErrorInjector) Get(ctx context.Context, key string) (*Entry, error) {
- if err := e.addError(); err != nil {
- return nil, err
- }
- return e.backend.Get(ctx, key)
-}
-
-func (e *ErrorInjector) Delete(ctx context.Context, key string) error {
- if err := e.addError(); err != nil {
- return err
- }
- return e.backend.Delete(ctx, key)
-}
-
-func (e *ErrorInjector) List(ctx context.Context, prefix string) ([]string, error) {
- if err := e.addError(); err != nil {
- return nil, err
- }
- return e.backend.List(ctx, prefix)
-}
-
-func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*TxnEntry) error {
- if err := e.addError(); err != nil {
- return err
- }
- return e.Transactional.Transaction(ctx, txns)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go
deleted file mode 100644
index d1433d8a..00000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/inmem.go
+++ /dev/null
@@ -1,263 +0,0 @@
-package inmem
-
-import (
- "context"
- "errors"
- "os"
- "strings"
- "sync"
- "sync/atomic"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/vault/physical"
-
- "github.com/armon/go-radix"
-)
-
-// Verify interfaces are satisfied
-var _ physical.Backend = (*InmemBackend)(nil)
-var _ physical.HABackend = (*InmemHABackend)(nil)
-var _ physical.HABackend = (*TransactionalInmemHABackend)(nil)
-var _ physical.Lock = (*InmemLock)(nil)
-var _ physical.Transactional = (*TransactionalInmemBackend)(nil)
-var _ physical.Transactional = (*TransactionalInmemHABackend)(nil)
-
-var (
- PutDisabledError = errors.New("put operations disabled in inmem backend")
- GetDisabledError = errors.New("get operations disabled in inmem backend")
- DeleteDisabledError = errors.New("delete operations disabled in inmem backend")
- ListDisabledError = errors.New("list operations disabled in inmem backend")
-)
-
-// InmemBackend is an in-memory only physical backend. It is useful
-// for testing and development situations where the data is not
-// expected to be durable.
-type InmemBackend struct {
- sync.RWMutex
- root *radix.Tree
- permitPool *physical.PermitPool
- logger log.Logger
- failGet *uint32
- failPut *uint32
- failDelete *uint32
- failList *uint32
- logOps bool
-}
-
-type TransactionalInmemBackend struct {
- InmemBackend
-}
-
-// NewInmem constructs a new in-memory backend
-func NewInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- in := &InmemBackend{
- root: radix.New(),
- permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
- logger: logger,
- failGet: new(uint32),
- failPut: new(uint32),
- failDelete: new(uint32),
- failList: new(uint32),
- logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
- }
- return in, nil
-}
-
-// Basically for now just creates a permit pool of size 1 so only one operation
-// can run at a time
-func NewTransactionalInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- in := &TransactionalInmemBackend{
- InmemBackend: InmemBackend{
- root: radix.New(),
- permitPool: physical.NewPermitPool(1),
- logger: logger,
- failGet: new(uint32),
- failPut: new(uint32),
- failDelete: new(uint32),
- failList: new(uint32),
- logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
- },
- }
- return in, nil
-}
-
-// Put is used to insert or update an entry
-func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.Lock()
- defer i.Unlock()
-
- return i.PutInternal(ctx, entry)
-}
-
-func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error {
- if i.logOps {
- i.logger.Trace("put", "key", entry.Key)
- }
- if atomic.LoadUint32(i.failPut) != 0 {
- return PutDisabledError
- }
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- i.root.Insert(entry.Key, entry.Value)
- return nil
-}
-
-func (i *InmemBackend) FailPut(fail bool) {
- var val uint32
- if fail {
- val = 1
- }
- atomic.StoreUint32(i.failPut, val)
-}
-
-// Get is used to fetch an entry
-func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.RLock()
- defer i.RUnlock()
-
- return i.GetInternal(ctx, key)
-}
-
-func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) {
- if i.logOps {
- i.logger.Trace("get", "key", key)
- }
- if atomic.LoadUint32(i.failGet) != 0 {
- return nil, GetDisabledError
- }
-
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- if raw, ok := i.root.Get(key); ok {
- return &physical.Entry{
- Key: key,
- Value: raw.([]byte),
- }, nil
- }
- return nil, nil
-}
-
-func (i *InmemBackend) FailGet(fail bool) {
- var val uint32
- if fail {
- val = 1
- }
- atomic.StoreUint32(i.failGet, val)
-}
-
-// Delete is used to permanently delete an entry
-func (i *InmemBackend) Delete(ctx context.Context, key string) error {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.Lock()
- defer i.Unlock()
-
- return i.DeleteInternal(ctx, key)
-}
-
-func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error {
- if i.logOps {
- i.logger.Trace("delete", "key", key)
- }
- if atomic.LoadUint32(i.failDelete) != 0 {
- return DeleteDisabledError
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- i.root.Delete(key)
- return nil
-}
-
-func (i *InmemBackend) FailDelete(fail bool) {
- var val uint32
- if fail {
- val = 1
- }
- atomic.StoreUint32(i.failDelete, val)
-}
-
-// List is used ot list all the keys under a given
-// prefix, up to the next prefix.
-func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error) {
- i.permitPool.Acquire()
- defer i.permitPool.Release()
-
- i.RLock()
- defer i.RUnlock()
-
- return i.ListInternal(ctx, prefix)
-}
-
-func (i *InmemBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) {
- if i.logOps {
- i.logger.Trace("list", "prefix", prefix)
- }
- if atomic.LoadUint32(i.failList) != 0 {
- return nil, ListDisabledError
- }
-
- var out []string
- seen := make(map[string]interface{})
- walkFn := func(s string, v interface{}) bool {
- trimmed := strings.TrimPrefix(s, prefix)
- sep := strings.Index(trimmed, "/")
- if sep == -1 {
- out = append(out, trimmed)
- } else {
- trimmed = trimmed[:sep+1]
- if _, ok := seen[trimmed]; !ok {
- out = append(out, trimmed)
- seen[trimmed] = struct{}{}
- }
- }
- return false
- }
- i.root.WalkPrefix(prefix, walkFn)
-
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- return out, nil
-}
-
-func (i *InmemBackend) FailList(fail bool) {
- var val uint32
- if fail {
- val = 1
- }
- atomic.StoreUint32(i.failList, val)
-}
-
-// Implements the transaction interface
-func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
- t.permitPool.Acquire()
- defer t.permitPool.Release()
-
- t.Lock()
- defer t.Unlock()
-
- return physical.GenericTransactionHandler(ctx, t, txns)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go b/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go
deleted file mode 100644
index 67551007..00000000
--- a/vendor/github.com/hashicorp/vault/physical/inmem/inmem_ha.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package inmem
-
-import (
- "fmt"
- "sync"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/vault/physical"
-)
-
-type InmemHABackend struct {
- physical.Backend
- locks map[string]string
- l *sync.Mutex
- cond *sync.Cond
- logger log.Logger
-}
-
-type TransactionalInmemHABackend struct {
- physical.Transactional
- InmemHABackend
-}
-
-// NewInmemHA constructs a new in-memory HA backend. This is only for testing.
-func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- be, err := NewInmem(nil, logger)
- if err != nil {
- return nil, err
- }
-
- in := &InmemHABackend{
- Backend: be,
- locks: make(map[string]string),
- logger: logger,
- l: new(sync.Mutex),
- }
- in.cond = sync.NewCond(in.l)
- return in, nil
-}
-
-func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) {
- transInmem, err := NewTransactionalInmem(nil, logger)
- if err != nil {
- return nil, err
- }
- inmemHA := InmemHABackend{
- Backend: transInmem,
- locks: make(map[string]string),
- logger: logger,
- l: new(sync.Mutex),
- }
-
- in := &TransactionalInmemHABackend{
- InmemHABackend: inmemHA,
- Transactional: transInmem.(physical.Transactional),
- }
- in.cond = sync.NewCond(in.l)
- return in, nil
-}
-
-// LockWith is used for mutual exclusion based on the given key.
-func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) {
- l := &InmemLock{
- in: i,
- key: key,
- value: value,
- }
- return l, nil
-}
-
-// LockMapSize is used in some tests to determine whether this backend has ever
-// been used for HA purposes rather than simply for storage
-func (i *InmemHABackend) LockMapSize() int {
- return len(i.locks)
-}
-
-// HAEnabled indicates whether the HA functionality should be exposed.
-// Currently always returns true.
-func (i *InmemHABackend) HAEnabled() bool {
- return true
-}
-
-// InmemLock is an in-memory Lock implementation for the HABackend
-type InmemLock struct {
- in *InmemHABackend
- key string
- value string
-
- held bool
- leaderCh chan struct{}
- l sync.Mutex
-}
-
-func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
- i.l.Lock()
- defer i.l.Unlock()
- if i.held {
- return nil, fmt.Errorf("lock already held")
- }
-
- // Attempt an async acquisition
- didLock := make(chan struct{})
- releaseCh := make(chan bool, 1)
- go func() {
- // Wait to acquire the lock
- i.in.l.Lock()
- _, ok := i.in.locks[i.key]
- for ok {
- i.in.cond.Wait()
- _, ok = i.in.locks[i.key]
- }
- i.in.locks[i.key] = i.value
- i.in.l.Unlock()
-
- // Signal that lock is held
- close(didLock)
-
- // Handle an early abort
- release := <-releaseCh
- if release {
- i.in.l.Lock()
- delete(i.in.locks, i.key)
- i.in.l.Unlock()
- i.in.cond.Broadcast()
- }
- }()
-
- // Wait for lock acquisition or shutdown
- select {
- case <-didLock:
- releaseCh <- false
- case <-stopCh:
- releaseCh <- true
- return nil, nil
- }
-
- // Create the leader channel
- i.held = true
- i.leaderCh = make(chan struct{})
- return i.leaderCh, nil
-}
-
-func (i *InmemLock) Unlock() error {
- i.l.Lock()
- defer i.l.Unlock()
-
- if !i.held {
- return nil
- }
-
- close(i.leaderCh)
- i.leaderCh = nil
- i.held = false
-
- i.in.l.Lock()
- delete(i.in.locks, i.key)
- i.in.l.Unlock()
- i.in.cond.Broadcast()
- return nil
-}
-
-func (i *InmemLock) Value() (bool, string, error) {
- i.in.l.Lock()
- val, ok := i.in.locks[i.key]
- i.in.l.Unlock()
- return ok, val, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/latency.go b/vendor/github.com/hashicorp/vault/physical/latency.go
deleted file mode 100644
index 18829714..00000000
--- a/vendor/github.com/hashicorp/vault/physical/latency.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package physical
-
-import (
- "context"
- "math/rand"
- "time"
-
- log "github.com/hashicorp/go-hclog"
-)
-
-const (
- // DefaultJitterPercent is used if no cache size is specified for NewCache
- DefaultJitterPercent = 20
-)
-
-// LatencyInjector is used to add latency into underlying physical requests
-type LatencyInjector struct {
- backend Backend
- latency time.Duration
- jitterPercent int
- random *rand.Rand
-}
-
-// TransactionalLatencyInjector is the transactional version of the latency
-// injector
-type TransactionalLatencyInjector struct {
- *LatencyInjector
- Transactional
-}
-
-// Verify LatencyInjector satisfies the correct interfaces
-var _ Backend = (*LatencyInjector)(nil)
-var _ Transactional = (*TransactionalLatencyInjector)(nil)
-
-// NewLatencyInjector returns a wrapped physical backend to simulate latency
-func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector {
- if jitter < 0 || jitter > 100 {
- jitter = DefaultJitterPercent
- }
- logger.Info("creating latency injector")
-
- return &LatencyInjector{
- backend: b,
- latency: latency,
- jitterPercent: jitter,
- random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- }
-}
-
-// NewTransactionalLatencyInjector creates a new transactional LatencyInjector
-func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector {
- return &TransactionalLatencyInjector{
- LatencyInjector: NewLatencyInjector(b, latency, jitter, logger),
- Transactional: b.(Transactional),
- }
-}
-
-func (l *LatencyInjector) addLatency() {
- // Calculate a value between 1 +- jitter%
- percent := 100
- if l.jitterPercent > 0 {
- min := 100 - l.jitterPercent
- max := 100 + l.jitterPercent
- percent = l.random.Intn(max-min) + min
- }
- latencyDuration := time.Duration(int(l.latency) * percent / 100)
- time.Sleep(latencyDuration)
-}
-
-// Put is a latent put request
-func (l *LatencyInjector) Put(ctx context.Context, entry *Entry) error {
- l.addLatency()
- return l.backend.Put(ctx, entry)
-}
-
-// Get is a latent get request
-func (l *LatencyInjector) Get(ctx context.Context, key string) (*Entry, error) {
- l.addLatency()
- return l.backend.Get(ctx, key)
-}
-
-// Delete is a latent delete request
-func (l *LatencyInjector) Delete(ctx context.Context, key string) error {
- l.addLatency()
- return l.backend.Delete(ctx, key)
-}
-
-// List is a latent list request
-func (l *LatencyInjector) List(ctx context.Context, prefix string) ([]string, error) {
- l.addLatency()
- return l.backend.List(ctx, prefix)
-}
-
-// Transaction is a latent transaction request
-func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []*TxnEntry) error {
- l.addLatency()
- return l.Transactional.Transaction(ctx, txns)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical.go b/vendor/github.com/hashicorp/vault/physical/physical.go
deleted file mode 100644
index 0f4b0002..00000000
--- a/vendor/github.com/hashicorp/vault/physical/physical.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package physical
-
-import (
- "context"
- "strings"
- "sync"
-
- log "github.com/hashicorp/go-hclog"
-)
-
-const DefaultParallelOperations = 128
-
-// The operation type
-type Operation string
-
-const (
- DeleteOperation Operation = "delete"
- GetOperation = "get"
- ListOperation = "list"
- PutOperation = "put"
-)
-
-// ShutdownSignal
-type ShutdownChannel chan struct{}
-
-// Backend is the interface required for a physical
-// backend. A physical backend is used to durably store
-// data outside of Vault. As such, it is completely untrusted,
-// and is only accessed via a security barrier. The backends
-// must represent keys in a hierarchical manner. All methods
-// are expected to be thread safe.
-type Backend interface {
- // Put is used to insert or update an entry
- Put(ctx context.Context, entry *Entry) error
-
- // Get is used to fetch an entry
- Get(ctx context.Context, key string) (*Entry, error)
-
- // Delete is used to permanently delete an entry
- Delete(ctx context.Context, key string) error
-
- // List is used to list all the keys under a given
- // prefix, up to the next prefix.
- List(ctx context.Context, prefix string) ([]string, error)
-}
-
-// HABackend is an extensions to the standard physical
-// backend to support high-availability. Vault only expects to
-// use mutual exclusion to allow multiple instances to act as a
-// hot standby for a leader that services all requests.
-type HABackend interface {
- // LockWith is used for mutual exclusion based on the given key.
- LockWith(key, value string) (Lock, error)
-
- // Whether or not HA functionality is enabled
- HAEnabled() bool
-}
-
-// ToggleablePurgemonster is an interface for backends that can toggle on or
-// off special functionality and/or support purging. This is only used for the
-// cache, don't use it for other things.
-type ToggleablePurgemonster interface {
- Purge(ctx context.Context)
- SetEnabled(bool)
-}
-
-// RedirectDetect is an optional interface that an HABackend
-// can implement. If they do, a redirect address can be automatically
-// detected.
-type RedirectDetect interface {
- // DetectHostAddr is used to detect the host address
- DetectHostAddr() (string, error)
-}
-
-// Callback signatures for RunServiceDiscovery
-type ActiveFunction func() bool
-type SealedFunction func() bool
-type PerformanceStandbyFunction func() bool
-
-// ServiceDiscovery is an optional interface that an HABackend can implement.
-// If they do, the state of a backend is advertised to the service discovery
-// network.
-type ServiceDiscovery interface {
- // NotifyActiveStateChange is used by Core to notify a backend
- // capable of ServiceDiscovery that this Vault instance has changed
- // its status to active or standby.
- NotifyActiveStateChange() error
-
- // NotifySealedStateChange is used by Core to notify a backend
- // capable of ServiceDiscovery that Vault has changed its Sealed
- // status to sealed or unsealed.
- NotifySealedStateChange() error
-
- // NotifyPerformanceStandbyStateChange is used by Core to notify a backend
- // capable of ServiceDiscovery that this Vault instance has changed it
- // status to performance standby or standby.
- NotifyPerformanceStandbyStateChange() error
-
- // Run executes any background service discovery tasks until the
- // shutdown channel is closed.
- RunServiceDiscovery(waitGroup *sync.WaitGroup, shutdownCh ShutdownChannel, redirectAddr string, activeFunc ActiveFunction, sealedFunc SealedFunction, perfStandbyFunc PerformanceStandbyFunction) error
-}
-
-type Lock interface {
- // Lock is used to acquire the given lock
- // The stopCh is optional and if closed should interrupt the lock
- // acquisition attempt. The return struct should be closed when
- // leadership is lost.
- Lock(stopCh <-chan struct{}) (<-chan struct{}, error)
-
- // Unlock is used to release the lock
- Unlock() error
-
- // Returns the value of the lock and if it is held
- Value() (bool, string, error)
-}
-
-// Factory is the factory function to create a physical backend.
-type Factory func(config map[string]string, logger log.Logger) (Backend, error)
-
-// PermitPool is used to limit maximum outstanding requests
-type PermitPool struct {
- sem chan int
-}
-
-// NewPermitPool returns a new permit pool with the provided
-// number of permits
-func NewPermitPool(permits int) *PermitPool {
- if permits < 1 {
- permits = DefaultParallelOperations
- }
- return &PermitPool{
- sem: make(chan int, permits),
- }
-}
-
-// Acquire returns when a permit has been acquired
-func (c *PermitPool) Acquire() {
- c.sem <- 1
-}
-
-// Release returns a permit to the pool
-func (c *PermitPool) Release() {
- <-c.sem
-}
-
-// Prefixes is a shared helper function returns all parent 'folders' for a
-// given vault key.
-// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar']
-func Prefixes(s string) []string {
- components := strings.Split(s, "/")
- result := []string{}
- for i := 1; i < len(components); i++ {
- result = append(result, strings.Join(components[:i], "/"))
- }
- return result
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical_access.go b/vendor/github.com/hashicorp/vault/physical/physical_access.go
deleted file mode 100644
index 7497313a..00000000
--- a/vendor/github.com/hashicorp/vault/physical/physical_access.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package physical
-
-import (
- "context"
-)
-
-// PhysicalAccess is a wrapper around physical.Backend that allows Core to
-// expose its physical storage operations through PhysicalAccess() while
-// restricting the ability to modify Core.physical itself.
-type PhysicalAccess struct {
- physical Backend
-}
-
-var _ Backend = (*PhysicalAccess)(nil)
-
-func NewPhysicalAccess(physical Backend) *PhysicalAccess {
- return &PhysicalAccess{physical: physical}
-}
-
-func (p *PhysicalAccess) Put(ctx context.Context, entry *Entry) error {
- return p.physical.Put(ctx, entry)
-}
-
-func (p *PhysicalAccess) Get(ctx context.Context, key string) (*Entry, error) {
- return p.physical.Get(ctx, key)
-}
-
-func (p *PhysicalAccess) Delete(ctx context.Context, key string) error {
- return p.physical.Delete(ctx, key)
-}
-
-func (p *PhysicalAccess) List(ctx context.Context, prefix string) ([]string, error) {
- return p.physical.List(ctx, prefix)
-}
-
-func (p *PhysicalAccess) Purge(ctx context.Context) {
- if purgeable, ok := p.physical.(ToggleablePurgemonster); ok {
- purgeable.Purge(ctx)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical_util.go b/vendor/github.com/hashicorp/vault/physical/physical_util.go
deleted file mode 100644
index c4863339..00000000
--- a/vendor/github.com/hashicorp/vault/physical/physical_util.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !enterprise
-
-package physical
-
-// Entry is used to represent data stored by the physical backend
-type Entry struct {
- Key string
- Value []byte
- SealWrap bool `json:"seal_wrap,omitempty"`
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/physical_view.go b/vendor/github.com/hashicorp/vault/physical/physical_view.go
deleted file mode 100644
index da505a4f..00000000
--- a/vendor/github.com/hashicorp/vault/physical/physical_view.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package physical
-
-import (
- "context"
- "errors"
- "strings"
-)
-
-var (
- ErrRelativePath = errors.New("relative paths not supported")
-)
-
-// View represents a prefixed view of a physical backend
-type View struct {
- backend Backend
- prefix string
-}
-
-// Verify View satisfies the correct interfaces
-var _ Backend = (*View)(nil)
-
-// NewView takes an underlying physical backend and returns
-// a view of it that can only operate with the given prefix.
-func NewView(backend Backend, prefix string) *View {
- return &View{
- backend: backend,
- prefix: prefix,
- }
-}
-
-// List the contents of the prefixed view
-func (v *View) List(ctx context.Context, prefix string) ([]string, error) {
- if err := v.sanityCheck(prefix); err != nil {
- return nil, err
- }
- return v.backend.List(ctx, v.expandKey(prefix))
-}
-
-// Get the key of the prefixed view
-func (v *View) Get(ctx context.Context, key string) (*Entry, error) {
- if err := v.sanityCheck(key); err != nil {
- return nil, err
- }
- entry, err := v.backend.Get(ctx, v.expandKey(key))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- if entry != nil {
- entry.Key = v.truncateKey(entry.Key)
- }
-
- return &Entry{
- Key: entry.Key,
- Value: entry.Value,
- }, nil
-}
-
-// Put the entry into the prefix view
-func (v *View) Put(ctx context.Context, entry *Entry) error {
- if err := v.sanityCheck(entry.Key); err != nil {
- return err
- }
-
- nested := &Entry{
- Key: v.expandKey(entry.Key),
- Value: entry.Value,
- }
- return v.backend.Put(ctx, nested)
-}
-
-// Delete the entry from the prefix view
-func (v *View) Delete(ctx context.Context, key string) error {
- if err := v.sanityCheck(key); err != nil {
- return err
- }
- return v.backend.Delete(ctx, v.expandKey(key))
-}
-
-// sanityCheck is used to perform a sanity check on a key
-func (v *View) sanityCheck(key string) error {
- if strings.Contains(key, "..") {
- return ErrRelativePath
- }
- return nil
-}
-
-// expandKey is used to expand to the full key path with the prefix
-func (v *View) expandKey(suffix string) string {
- return v.prefix + suffix
-}
-
-// truncateKey is used to remove the prefix of the key
-func (v *View) truncateKey(full string) string {
- return strings.TrimPrefix(full, v.prefix)
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/testing.go b/vendor/github.com/hashicorp/vault/physical/testing.go
deleted file mode 100644
index 6bff9d22..00000000
--- a/vendor/github.com/hashicorp/vault/physical/testing.go
+++ /dev/null
@@ -1,488 +0,0 @@
-package physical
-
-import (
- "context"
- "reflect"
- "sort"
- "testing"
- "time"
-)
-
-func ExerciseBackend(t testing.TB, b Backend) {
- t.Helper()
-
- // Should be empty
- keys, err := b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("initial list failed: %v", err)
- }
- if len(keys) != 0 {
- t.Errorf("initial not empty: %v", keys)
- }
-
- // Delete should work if it does not exist
- err = b.Delete(context.Background(), "foo")
- if err != nil {
- t.Fatalf("idempotent delete: %v", err)
- }
-
- // Get should not fail, but be nil
- out, err := b.Get(context.Background(), "foo")
- if err != nil {
- t.Fatalf("initial get failed: %v", err)
- }
- if out != nil {
- t.Errorf("initial get was not nil: %v", out)
- }
-
- // Make an entry
- e := &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("put failed: %v", err)
- }
-
- // Get should work
- out, err = b.Get(context.Background(), "foo")
- if err != nil {
- t.Fatalf("get failed: %v", err)
- }
- if !reflect.DeepEqual(out, e) {
- t.Errorf("bad: %v expected: %v", out, e)
- }
-
- // List should not be empty
- keys, err = b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list failed: %v", err)
- }
- if len(keys) != 1 || keys[0] != "foo" {
- t.Errorf("keys[0] did not equal foo: %v", keys)
- }
-
- // Delete should work
- err = b.Delete(context.Background(), "foo")
- if err != nil {
- t.Fatalf("delete: %v", err)
- }
-
- // Should be empty
- keys, err = b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list after delete: %v", err)
- }
- if len(keys) != 0 {
- t.Errorf("list after delete not empty: %v", keys)
- }
-
- // Get should fail
- out, err = b.Get(context.Background(), "foo")
- if err != nil {
- t.Fatalf("get after delete: %v", err)
- }
- if out != nil {
- t.Errorf("get after delete not nil: %v", out)
- }
-
- // Multiple Puts should work; GH-189
- e = &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("multi put 1 failed: %v", err)
- }
- e = &Entry{Key: "foo", Value: []byte("test")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("multi put 2 failed: %v", err)
- }
-
- // Make a nested entry
- e = &Entry{Key: "foo/bar", Value: []byte("baz")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("nested put failed: %v", err)
- }
-
- keys, err = b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list multi failed: %v", err)
- }
- sort.Strings(keys)
- if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" {
- t.Errorf("expected 2 keys [foo, foo/]: %v", keys)
- }
-
- // Delete with children should work
- err = b.Delete(context.Background(), "foo")
- if err != nil {
- t.Fatalf("delete after multi: %v", err)
- }
-
- // Get should return the child
- out, err = b.Get(context.Background(), "foo/bar")
- if err != nil {
- t.Fatalf("get after multi delete: %v", err)
- }
- if out == nil {
- t.Errorf("get after multi delete not nil: %v", out)
- }
-
- // Removal of nested secret should not leave artifacts
- e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("deep nest: %v", err)
- }
-
- err = b.Delete(context.Background(), "foo/nested1/nested2/nested3")
- if err != nil {
- t.Fatalf("failed to remove deep nest: %v", err)
- }
-
- keys, err = b.List(context.Background(), "foo/")
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if len(keys) != 1 || keys[0] != "bar" {
- t.Errorf("should be exactly 1 key == bar: %v", keys)
- }
-
- // Make a second nested entry to test prefix removal
- e = &Entry{Key: "foo/zip", Value: []byte("zap")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("failed to create second nested: %v", err)
- }
-
- // Delete should not remove the prefix
- err = b.Delete(context.Background(), "foo/bar")
- if err != nil {
- t.Fatalf("failed to delete nested prefix: %v", err)
- }
-
- keys, err = b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list nested prefix: %v", err)
- }
- if len(keys) != 1 || keys[0] != "foo/" {
- t.Errorf("should be exactly 1 key == foo/: %v", keys)
- }
-
- // Delete should remove the prefix
- err = b.Delete(context.Background(), "foo/zip")
- if err != nil {
- t.Fatalf("failed to delete second prefix: %v", err)
- }
-
- keys, err = b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("listing after second delete failed: %v", err)
- }
- if len(keys) != 0 {
- t.Errorf("should be empty at end: %v", keys)
- }
-
- // When the root path is empty, adding and removing deep nested values should not break listing
- e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("deep nest: %v", err)
- }
-
- e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")}
- err = b.Put(context.Background(), e)
- if err != nil {
- t.Fatalf("deep nest: %v", err)
- }
-
- err = b.Delete(context.Background(), "foo/nested1/nested2/value2")
- if err != nil {
- t.Fatalf("failed to remove deep nest: %v", err)
- }
-
- keys, err = b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("listing of root failed after deletion: %v", err)
- }
- if len(keys) == 0 {
- t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys)
- keys, err = b.List(context.Background(), "foo/nested1")
- if err != nil {
- t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err)
- }
- // prove that the root should not be empty and that foo/nested1 exists
- if len(keys) != 0 {
- t.Logf(" keys can still be listed from nested1/ so it's not empty, expected nested2/: %v", keys)
- }
- }
-
- // cleanup left over listing bug test value
- err = b.Delete(context.Background(), "foo/nested1/nested2/value1")
- if err != nil {
- t.Fatalf("failed to remove deep nest: %v", err)
- }
-
- keys, err = b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("listing of root failed after delete of deep nest: %v", err)
- }
- if len(keys) != 0 {
- t.Errorf("should be empty at end: %v", keys)
- }
-}
-
-func ExerciseBackend_ListPrefix(t testing.TB, b Backend) {
- t.Helper()
-
- e1 := &Entry{Key: "foo", Value: []byte("test")}
- e2 := &Entry{Key: "foo/bar", Value: []byte("test")}
- e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")}
-
- defer func() {
- b.Delete(context.Background(), "foo")
- b.Delete(context.Background(), "foo/bar")
- b.Delete(context.Background(), "foo/bar/baz")
- }()
-
- err := b.Put(context.Background(), e1)
- if err != nil {
- t.Fatalf("failed to put entry 1: %v", err)
- }
- err = b.Put(context.Background(), e2)
- if err != nil {
- t.Fatalf("failed to put entry 2: %v", err)
- }
- err = b.Put(context.Background(), e3)
- if err != nil {
- t.Fatalf("failed to put entry 3: %v", err)
- }
-
- // Scan the root
- keys, err := b.List(context.Background(), "")
- if err != nil {
- t.Fatalf("list root: %v", err)
- }
- sort.Strings(keys)
- if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" {
- t.Errorf("root expected [foo foo/]: %v", keys)
- }
-
- // Scan foo/
- keys, err = b.List(context.Background(), "foo/")
- if err != nil {
- t.Fatalf("list level 1: %v", err)
- }
- sort.Strings(keys)
- if len(keys) != 2 || keys[0] != "bar" || keys[1] != "bar/" {
- t.Errorf("level 1 expected [bar bar/]: %v", keys)
- }
-
- // Scan foo/bar/
- keys, err = b.List(context.Background(), "foo/bar/")
- if err != nil {
- t.Fatalf("list level 2: %v", err)
- }
- sort.Strings(keys)
- if len(keys) != 1 || keys[0] != "baz" {
- t.Errorf("level 1 expected [baz]: %v", keys)
- }
-}
-
-func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) {
- t.Helper()
-
- // Get the lock
- lock, err := b.LockWith("foo", "bar")
- if err != nil {
- t.Fatalf("initial lock: %v", err)
- }
-
- // Attempt to lock
- leaderCh, err := lock.Lock(nil)
- if err != nil {
- t.Fatalf("lock attempt 1: %v", err)
- }
- if leaderCh == nil {
- t.Fatalf("missing leaderCh")
- }
-
- // Check the value
- held, val, err := lock.Value()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- if !held {
- t.Errorf("should be held")
- }
- if val != "bar" {
- t.Errorf("expected value bar: %v", err)
- }
-
- // Second acquisition should fail
- lock2, err := b2.LockWith("foo", "baz")
- if err != nil {
- t.Fatalf("lock 2: %v", err)
- }
-
- // Cancel attempt in 50 msec
- stopCh := make(chan struct{})
- time.AfterFunc(50*time.Millisecond, func() {
- close(stopCh)
- })
-
- // Attempt to lock
- leaderCh2, err := lock2.Lock(stopCh)
- if err != nil {
- t.Fatalf("stop lock 2: %v", err)
- }
- if leaderCh2 != nil {
- t.Errorf("should not have gotten leaderCh: %v", leaderCh2)
- }
-
- // Release the first lock
- lock.Unlock()
-
- // Attempt to lock should work
- leaderCh2, err = lock2.Lock(nil)
- if err != nil {
- t.Fatalf("lock 2 lock: %v", err)
- }
- if leaderCh2 == nil {
- t.Errorf("should get leaderCh")
- }
-
- // Check the value
- held, val, err = lock2.Value()
- if err != nil {
- t.Fatalf("value: %v", err)
- }
- if !held {
- t.Errorf("should still be held")
- }
- if val != "baz" {
- t.Errorf("expected: baz, got: %v", val)
- }
-
- // Cleanup
- lock2.Unlock()
-}
-
-func ExerciseTransactionalBackend(t testing.TB, b Backend) {
- t.Helper()
- tb, ok := b.(Transactional)
- if !ok {
- t.Fatal("Not a transactional backend")
- }
-
- txns := SetupTestingTransactions(t, b)
-
- if err := tb.Transaction(context.Background(), txns); err != nil {
- t.Fatal(err)
- }
-
- keys, err := b.List(context.Background(), "")
- if err != nil {
- t.Fatal(err)
- }
-
- expected := []string{"foo", "zip"}
-
- sort.Strings(keys)
- sort.Strings(expected)
- if !reflect.DeepEqual(keys, expected) {
- t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys)
- }
-
- entry, err := b.Get(context.Background(), "foo")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "bar3" {
- t.Fatal("updates did not apply correctly")
- }
-
- entry, err = b.Get(context.Background(), "zip")
- if err != nil {
- t.Fatal(err)
- }
- if entry == nil {
- t.Fatal("got nil entry")
- }
- if entry.Value == nil {
- t.Fatal("got nil value")
- }
- if string(entry.Value) != "zap3" {
- t.Fatal("updates did not apply correctly")
- }
-}
-
-func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry {
- t.Helper()
- // Add a few keys so that we test rollback with deletion
- if err := b.Put(context.Background(), &Entry{
- Key: "foo",
- Value: []byte("bar"),
- }); err != nil {
- t.Fatal(err)
- }
- if err := b.Put(context.Background(), &Entry{
- Key: "zip",
- Value: []byte("zap"),
- }); err != nil {
- t.Fatal(err)
- }
- if err := b.Put(context.Background(), &Entry{
- Key: "deleteme",
- }); err != nil {
- t.Fatal(err)
- }
- if err := b.Put(context.Background(), &Entry{
- Key: "deleteme2",
- }); err != nil {
- t.Fatal(err)
- }
-
- txns := []*TxnEntry{
- &TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "foo",
- Value: []byte("bar2"),
- },
- },
- &TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: "deleteme",
- },
- },
- &TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "foo",
- Value: []byte("bar3"),
- },
- },
- &TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: "deleteme2",
- },
- },
- &TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: "zip",
- Value: []byte("zap3"),
- },
- },
- }
-
- return txns
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/transactions.go b/vendor/github.com/hashicorp/vault/physical/transactions.go
deleted file mode 100644
index 19f0d2cb..00000000
--- a/vendor/github.com/hashicorp/vault/physical/transactions.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package physical
-
-import (
- "context"
-
- multierror "github.com/hashicorp/go-multierror"
-)
-
-// TxnEntry is an operation that takes atomically as part of
-// a transactional update. Only supported by Transactional backends.
-type TxnEntry struct {
- Operation Operation
- Entry *Entry
-}
-
-// Transactional is an optional interface for backends that
-// support doing transactional updates of multiple keys. This is
-// required for some features such as replication.
-type Transactional interface {
- // The function to run a transaction
- Transaction(context.Context, []*TxnEntry) error
-}
-
-type TransactionalBackend interface {
- Backend
- Transactional
-}
-
-type PseudoTransactional interface {
- // An internal function should do no locking or permit pool acquisition.
- // Depending on the backend and if it natively supports transactions, these
- // may simply chain to the normal backend functions.
- GetInternal(context.Context, string) (*Entry, error)
- PutInternal(context.Context, *Entry) error
- DeleteInternal(context.Context, string) error
-}
-
-// Implements the transaction interface
-func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns []*TxnEntry) (retErr error) {
- rollbackStack := make([]*TxnEntry, 0, len(txns))
- var dirty bool
-
- // We walk the transactions in order; each successful operation goes into a
- // LIFO for rollback if we hit an error along the way
-TxnWalk:
- for _, txn := range txns {
- switch txn.Operation {
- case DeleteOperation:
- entry, err := t.GetInternal(ctx, txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- if entry == nil {
- // Nothing to delete or roll back
- continue
- }
- rollbackEntry := &TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: entry.Key,
- Value: entry.Value,
- },
- }
- err = t.DeleteInternal(ctx, txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...)
-
- case PutOperation:
- entry, err := t.GetInternal(ctx, txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- // Nothing existed so in fact rolling back requires a delete
- var rollbackEntry *TxnEntry
- if entry == nil {
- rollbackEntry = &TxnEntry{
- Operation: DeleteOperation,
- Entry: &Entry{
- Key: txn.Entry.Key,
- },
- }
- } else {
- rollbackEntry = &TxnEntry{
- Operation: PutOperation,
- Entry: &Entry{
- Key: entry.Key,
- Value: entry.Value,
- },
- }
- }
-
- err = t.PutInternal(ctx, txn.Entry)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- dirty = true
- break TxnWalk
- }
- rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...)
- }
- }
-
- // Need to roll back because we hit an error along the way
- if dirty {
- // While traversing this, if we get an error, we continue anyways in
- // best-effort fashion
- for _, txn := range rollbackStack {
- switch txn.Operation {
- case DeleteOperation:
- err := t.DeleteInternal(ctx, txn.Entry.Key)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- }
- case PutOperation:
- err := t.PutInternal(ctx, txn.Entry)
- if err != nil {
- retErr = multierror.Append(retErr, err)
- }
- }
- }
- }
-
- return
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/types.pb.go b/vendor/github.com/hashicorp/vault/physical/types.pb.go
deleted file mode 100644
index 91fbb0e8..00000000
--- a/vendor/github.com/hashicorp/vault/physical/types.pb.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: physical/types.proto
-
-package physical
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type EncryptedBlobInfo struct {
- Ciphertext []byte `protobuf:"bytes,1,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"`
- IV []byte `protobuf:"bytes,2,opt,name=iv,proto3" json:"iv,omitempty"`
- HMAC []byte `protobuf:"bytes,3,opt,name=hmac,proto3" json:"hmac,omitempty"`
- Wrapped bool `protobuf:"varint,4,opt,name=wrapped,proto3" json:"wrapped,omitempty"`
- KeyInfo *SealKeyInfo `protobuf:"bytes,5,opt,name=key_info,json=keyInfo,proto3" json:"key_info,omitempty"`
- // Key is the Key value for the entry that corresponds to
- // physical.Entry.Key's value
- Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EncryptedBlobInfo) Reset() { *m = EncryptedBlobInfo{} }
-func (m *EncryptedBlobInfo) String() string { return proto.CompactTextString(m) }
-func (*EncryptedBlobInfo) ProtoMessage() {}
-func (*EncryptedBlobInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_deea33bd14ea5328, []int{0}
-}
-
-func (m *EncryptedBlobInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EncryptedBlobInfo.Unmarshal(m, b)
-}
-func (m *EncryptedBlobInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EncryptedBlobInfo.Marshal(b, m, deterministic)
-}
-func (m *EncryptedBlobInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EncryptedBlobInfo.Merge(m, src)
-}
-func (m *EncryptedBlobInfo) XXX_Size() int {
- return xxx_messageInfo_EncryptedBlobInfo.Size(m)
-}
-func (m *EncryptedBlobInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_EncryptedBlobInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EncryptedBlobInfo proto.InternalMessageInfo
-
-func (m *EncryptedBlobInfo) GetCiphertext() []byte {
- if m != nil {
- return m.Ciphertext
- }
- return nil
-}
-
-func (m *EncryptedBlobInfo) GetIV() []byte {
- if m != nil {
- return m.IV
- }
- return nil
-}
-
-func (m *EncryptedBlobInfo) GetHMAC() []byte {
- if m != nil {
- return m.HMAC
- }
- return nil
-}
-
-func (m *EncryptedBlobInfo) GetWrapped() bool {
- if m != nil {
- return m.Wrapped
- }
- return false
-}
-
-func (m *EncryptedBlobInfo) GetKeyInfo() *SealKeyInfo {
- if m != nil {
- return m.KeyInfo
- }
- return nil
-}
-
-func (m *EncryptedBlobInfo) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-// SealKeyInfo contains information regarding the seal used to encrypt the entry.
-type SealKeyInfo struct {
- // Mechanism is the method used by the seal to encrypt and sign the
- // data as defined by the seal.
- Mechanism uint64 `protobuf:"varint,1,opt,name=Mechanism,proto3" json:"Mechanism,omitempty"`
- HMACMechanism uint64 `protobuf:"varint,2,opt,name=HMACMechanism,proto3" json:"HMACMechanism,omitempty"`
- // This is an opaque ID used by the seal to identify the specific
- // key to use as defined by the seal. This could be a version, key
- // label, or something else.
- KeyID string `protobuf:"bytes,3,opt,name=KeyID,proto3" json:"KeyID,omitempty"`
- HMACKeyID string `protobuf:"bytes,4,opt,name=HMACKeyID,proto3" json:"HMACKeyID,omitempty"`
- // These value are used when generating our own data encryption keys
- // and encrypting them using the autoseal
- WrappedKey []byte `protobuf:"bytes,5,opt,name=WrappedKey,proto3" json:"WrappedKey,omitempty"`
- // Mechanism specific flags
- Flags uint64 `protobuf:"varint,6,opt,name=Flags,proto3" json:"Flags,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SealKeyInfo) Reset() { *m = SealKeyInfo{} }
-func (m *SealKeyInfo) String() string { return proto.CompactTextString(m) }
-func (*SealKeyInfo) ProtoMessage() {}
-func (*SealKeyInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_deea33bd14ea5328, []int{1}
-}
-
-func (m *SealKeyInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SealKeyInfo.Unmarshal(m, b)
-}
-func (m *SealKeyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SealKeyInfo.Marshal(b, m, deterministic)
-}
-func (m *SealKeyInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SealKeyInfo.Merge(m, src)
-}
-func (m *SealKeyInfo) XXX_Size() int {
- return xxx_messageInfo_SealKeyInfo.Size(m)
-}
-func (m *SealKeyInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_SealKeyInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SealKeyInfo proto.InternalMessageInfo
-
-func (m *SealKeyInfo) GetMechanism() uint64 {
- if m != nil {
- return m.Mechanism
- }
- return 0
-}
-
-func (m *SealKeyInfo) GetHMACMechanism() uint64 {
- if m != nil {
- return m.HMACMechanism
- }
- return 0
-}
-
-func (m *SealKeyInfo) GetKeyID() string {
- if m != nil {
- return m.KeyID
- }
- return ""
-}
-
-func (m *SealKeyInfo) GetHMACKeyID() string {
- if m != nil {
- return m.HMACKeyID
- }
- return ""
-}
-
-func (m *SealKeyInfo) GetWrappedKey() []byte {
- if m != nil {
- return m.WrappedKey
- }
- return nil
-}
-
-func (m *SealKeyInfo) GetFlags() uint64 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*EncryptedBlobInfo)(nil), "physical.EncryptedBlobInfo")
- proto.RegisterType((*SealKeyInfo)(nil), "physical.SealKeyInfo")
-}
-
-func init() { proto.RegisterFile("physical/types.proto", fileDescriptor_deea33bd14ea5328) }
-
-var fileDescriptor_deea33bd14ea5328 = []byte{
- // 312 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x5f, 0x4b, 0xc3, 0x30,
- 0x14, 0xc5, 0x69, 0xd7, 0xfd, 0xbb, 0x9b, 0xa2, 0x61, 0x42, 0x1e, 0x44, 0xca, 0x54, 0xe8, 0x53,
- 0x2b, 0xfa, 0x09, 0x9c, 0x7f, 0x50, 0xc6, 0x5e, 0xe2, 0x83, 0xe0, 0x8b, 0x64, 0x59, 0xb6, 0x84,
- 0x75, 0x4d, 0x68, 0xb3, 0x69, 0x3e, 0x98, 0x4f, 0x7e, 0x39, 0x49, 0x4a, 0xd9, 0x7c, 0xbb, 0xf7,
- 0x97, 0xc3, 0xe1, 0x9c, 0x1b, 0x18, 0x69, 0x61, 0x2b, 0xc9, 0x68, 0x9e, 0x19, 0xab, 0x79, 0x95,
- 0xea, 0x52, 0x19, 0x85, 0x7a, 0x0d, 0x1d, 0xff, 0x04, 0x70, 0xfa, 0x54, 0xb0, 0xd2, 0x6a, 0xc3,
- 0x17, 0x93, 0x5c, 0xcd, 0x5f, 0x8b, 0xa5, 0x42, 0x17, 0x00, 0x4c, 0x6a, 0xc1, 0x4b, 0xc3, 0xbf,
- 0x0d, 0x0e, 0xe2, 0x20, 0x19, 0x92, 0x03, 0x82, 0x8e, 0x21, 0x94, 0x3b, 0x1c, 0x7a, 0x1e, 0xca,
- 0x1d, 0x42, 0x10, 0x89, 0x0d, 0x65, 0xb8, 0xe5, 0x89, 0x9f, 0x11, 0x86, 0xee, 0x57, 0x49, 0xb5,
- 0xe6, 0x0b, 0x1c, 0xc5, 0x41, 0xd2, 0x23, 0xcd, 0x8a, 0x6e, 0xa0, 0xb7, 0xe6, 0xf6, 0x53, 0x16,
- 0x4b, 0x85, 0xdb, 0x71, 0x90, 0x0c, 0x6e, 0xcf, 0xd2, 0x26, 0x50, 0xfa, 0xc6, 0x69, 0x3e, 0xe5,
- 0xd6, 0xc5, 0x20, 0xdd, 0x75, 0x3d, 0xa0, 0x13, 0x68, 0xad, 0xb9, 0xc5, 0x9d, 0x38, 0x48, 0xfa,
- 0xc4, 0x8d, 0xe3, 0xdf, 0x00, 0x06, 0x07, 0x52, 0x74, 0x0e, 0xfd, 0x19, 0x67, 0x82, 0x16, 0xb2,
- 0xda, 0xf8, 0xc0, 0x11, 0xd9, 0x03, 0x74, 0x05, 0x47, 0x2f, 0xb3, 0xfb, 0x87, 0xbd, 0x22, 0xf4,
- 0x8a, 0xff, 0x10, 0x8d, 0xa0, 0xed, 0xec, 0x1e, 0x7d, 0x8d, 0x3e, 0xa9, 0x17, 0xe7, 0xec, 0x64,
- 0xf5, 0x4b, 0xe4, 0x5f, 0xf6, 0xc0, 0x5d, 0xea, 0xbd, 0xae, 0x35, 0xe5, 0xd6, 0xb7, 0x19, 0x92,
- 0x03, 0xe2, 0x3c, 0x9f, 0x73, 0xba, 0xaa, 0x7c, 0xf6, 0x88, 0xd4, 0xcb, 0xe4, 0xfa, 0xe3, 0x72,
- 0x25, 0x8d, 0xd8, 0xce, 0x53, 0xa6, 0x36, 0x99, 0xa0, 0x95, 0x90, 0x4c, 0x95, 0x3a, 0xdb, 0xd1,
- 0x6d, 0x6e, 0xb2, 0xe6, 0x16, 0xf3, 0x8e, 0xff, 0xad, 0xbb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0x01, 0x95, 0xea, 0x9d, 0xc5, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/hashicorp/vault/physical/types.proto b/vendor/github.com/hashicorp/vault/physical/types.proto
deleted file mode 100644
index 0cc2eb53..00000000
--- a/vendor/github.com/hashicorp/vault/physical/types.proto
+++ /dev/null
@@ -1,38 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/hashicorp/vault/physical";
-
-package physical;
-
-message EncryptedBlobInfo {
- bytes ciphertext = 1;
- bytes iv = 2;
- bytes hmac = 3;
- bool wrapped = 4;
- SealKeyInfo key_info = 5;
-
- // Key is the Key value for the entry that corresponds to
- // physical.Entry.Key's value
- string key = 6;
-}
-
-// SealKeyInfo contains information regarding the seal used to encrypt the entry.
-message SealKeyInfo {
- // Mechanism is the method used by the seal to encrypt and sign the
- // data as defined by the seal.
- uint64 Mechanism = 1;
- uint64 HMACMechanism = 2;
-
- // This is an opaque ID used by the seal to identify the specific
- // key to use as defined by the seal. This could be a version, key
- // label, or something else.
- string KeyID = 3;
- string HMACKeyID = 4;
-
- // These value are used when generating our own data encryption keys
- // and encrypting them using the autoseal
- bytes WrappedKey = 5;
-
- // Mechanism specific flags
- uint64 Flags = 6;
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go b/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go
deleted file mode 100644
index a36f1a86..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/mysql/mysql.go
+++ /dev/null
@@ -1,317 +0,0 @@
-package mysql
-
-import (
- "context"
- "database/sql"
- "errors"
- "strings"
- "time"
-
- stdmysql "github.com/go-sql-driver/mysql"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/dbtxn"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
-)
-
-const (
- defaultMysqlRevocationStmts = `
- REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%';
- DROP USER '{{name}}'@'%'
- `
-
- defaultMySQLRotateRootCredentialsSQL = `
- ALTER USER '{{username}}'@'%' IDENTIFIED BY '{{password}}';
- `
-
- mySQLTypeName = "mysql"
-)
-
-var (
- MetadataLen int = 10
- LegacyMetadataLen int = 4
- UsernameLen int = 32
- LegacyUsernameLen int = 16
-)
-
-var _ dbplugin.Database = &MySQL{}
-
-type MySQL struct {
- *connutil.SQLConnectionProducer
- credsutil.CredentialsProducer
-}
-
-// New implements builtinplugins.BuiltinFactory
-func New(displayNameLen, roleNameLen, usernameLen int) func() (interface{}, error) {
- return func() (interface{}, error) {
- db := new(displayNameLen, roleNameLen, usernameLen)
- // Wrap the plugin with middleware to sanitize errors
- dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.SecretValues)
-
- return dbType, nil
- }
-}
-
-func new(displayNameLen, roleNameLen, usernameLen int) *MySQL {
- connProducer := &connutil.SQLConnectionProducer{}
- connProducer.Type = mySQLTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: displayNameLen,
- RoleNameLen: roleNameLen,
- UsernameLen: usernameLen,
- Separator: "-",
- }
-
- return &MySQL{
- SQLConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
-}
-
-// Run instantiates a MySQL object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- return runCommon(false, apiTLSConfig)
-}
-
-// Run instantiates a MySQL object, and runs the RPC server for the plugin
-func RunLegacy(apiTLSConfig *api.TLSConfig) error {
- return runCommon(true, apiTLSConfig)
-}
-
-func runCommon(legacy bool, apiTLSConfig *api.TLSConfig) error {
- var f func() (interface{}, error)
- if legacy {
- f = New(credsutil.NoneLength, LegacyMetadataLen, LegacyUsernameLen)
- } else {
- f = New(MetadataLen, MetadataLen, UsernameLen)
- }
- dbType, err := f()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(dbplugin.Database), apiTLSConfig)
-
- return nil
-}
-
-func (m *MySQL) Type() (string, error) {
- return mySQLTypeName, nil
-}
-
-func (m *MySQL) getConnection(ctx context.Context) (*sql.DB, error) {
- db, err := m.Connection(ctx)
- if err != nil {
- return nil, err
- }
-
- return db.(*sql.DB), nil
-}
-
-func (m *MySQL) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- // Grab the lock
- m.Lock()
- defer m.Unlock()
-
- statements = dbutil.StatementCompatibilityHelper(statements)
-
- // Get the connection
- db, err := m.getConnection(ctx)
- if err != nil {
- return "", "", err
- }
-
- if len(statements.Creation) == 0 {
- return "", "", dbutil.ErrEmptyCreationStatement
- }
-
- username, err = m.GenerateUsername(usernameConfig)
- if err != nil {
- return "", "", err
- }
-
- password, err = m.GeneratePassword()
- if err != nil {
- return "", "", err
- }
-
- expirationStr, err := m.GenerateExpiration(expiration)
- if err != nil {
- return "", "", err
- }
-
- // Start a transaction
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return "", "", err
- }
- defer tx.Rollback()
-
- // Execute each query
- for _, stmt := range statements.Creation {
- for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
- query = dbutil.QueryHelper(query, map[string]string{
- "name": username,
- "password": password,
- "expiration": expirationStr,
- })
-
- stmt, err := tx.PrepareContext(ctx, query)
- if err != nil {
- // If the error code we get back is Error 1295: This command is not
- // supported in the prepared statement protocol yet, we will execute
- // the statement without preparing it. This allows the caller to
- // manually prepare statements, as well as run other not yet
- // prepare supported commands. If there is no error when running we
- // will continue to the next statement.
- if e, ok := err.(*stdmysql.MySQLError); ok && e.Number == 1295 {
- _, err = tx.ExecContext(ctx, query)
- if err != nil {
- return "", "", err
- }
- continue
- }
-
- return "", "", err
- }
- if _, err := stmt.ExecContext(ctx); err != nil {
- stmt.Close()
- return "", "", err
- }
- stmt.Close()
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return "", "", err
- }
-
- return username, password, nil
-}
-
-// NOOP
-func (m *MySQL) RenewUser(ctx context.Context, statements dbplugin.Statements, username string, expiration time.Time) error {
- return nil
-}
-
-func (m *MySQL) RevokeUser(ctx context.Context, statements dbplugin.Statements, username string) error {
- // Grab the read lock
- m.Lock()
- defer m.Unlock()
-
- statements = dbutil.StatementCompatibilityHelper(statements)
-
- // Get the connection
- db, err := m.getConnection(ctx)
- if err != nil {
- return err
- }
-
- revocationStmts := statements.Revocation
- // Use a default SQL statement for revocation if one cannot be fetched from the role
- if len(revocationStmts) == 0 {
- revocationStmts = []string{defaultMysqlRevocationStmts}
- }
-
- // Start a transaction
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- for _, stmt := range revocationStmts {
- for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- // This is not a prepared statement because not all commands are supported
- // 1295: This command is not supported in the prepared statement protocol yet
- // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/
- query = strings.Replace(query, "{{name}}", username, -1)
- _, err = tx.ExecContext(ctx, query)
- if err != nil {
- return err
- }
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *MySQL) RotateRootCredentials(ctx context.Context, statements []string) (map[string]interface{}, error) {
- m.Lock()
- defer m.Unlock()
-
- if len(m.Username) == 0 || len(m.Password) == 0 {
- return nil, errors.New("username and password are required to rotate")
- }
-
- rotateStatents := statements
- if len(rotateStatents) == 0 {
- rotateStatents = []string{defaultMySQLRotateRootCredentialsSQL}
- }
-
- db, err := m.getConnection(ctx)
- if err != nil {
- return nil, err
- }
-
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return nil, err
- }
- defer func() {
- tx.Rollback()
- }()
-
- password, err := m.GeneratePassword()
- if err != nil {
- return nil, err
- }
-
- for _, stmt := range rotateStatents {
- for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- m := map[string]string{
- "username": m.Username,
- "password": password,
- }
- if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil {
- return nil, err
- }
- }
- }
-
- if err := tx.Commit(); err != nil {
- return nil, err
- }
-
- if err := db.Close(); err != nil {
- return nil, err
- }
-
- m.RawConfig["password"] = password
- return m.RawConfig, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go b/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go
deleted file mode 100644
index 36dd0036..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql.go
+++ /dev/null
@@ -1,427 +0,0 @@
-package postgresql
-
-import (
- "context"
- "database/sql"
- "errors"
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/dbtxn"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/plugins"
- "github.com/hashicorp/vault/plugins/helper/database/connutil"
- "github.com/hashicorp/vault/plugins/helper/database/credsutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
- "github.com/lib/pq"
-)
-
-const (
- postgreSQLTypeName = "postgres"
- defaultPostgresRenewSQL = `
-ALTER ROLE "{{name}}" VALID UNTIL '{{expiration}}';
-`
- defaultPostgresRotateRootCredentialsSQL = `
-ALTER ROLE "{{username}}" WITH PASSWORD '{{password}}';
-`
-)
-
-var _ dbplugin.Database = &PostgreSQL{}
-
-// New implements builtinplugins.BuiltinFactory
-func New() (interface{}, error) {
- db := new()
- // Wrap the plugin with middleware to sanitize errors
- dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.SecretValues)
- return dbType, nil
-}
-
-func new() *PostgreSQL {
- connProducer := &connutil.SQLConnectionProducer{}
- connProducer.Type = postgreSQLTypeName
-
- credsProducer := &credsutil.SQLCredentialsProducer{
- DisplayNameLen: 8,
- RoleNameLen: 8,
- UsernameLen: 63,
- Separator: "-",
- }
-
- db := &PostgreSQL{
- SQLConnectionProducer: connProducer,
- CredentialsProducer: credsProducer,
- }
-
- return db
-}
-
-// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin
-func Run(apiTLSConfig *api.TLSConfig) error {
- dbType, err := New()
- if err != nil {
- return err
- }
-
- plugins.Serve(dbType.(dbplugin.Database), apiTLSConfig)
-
- return nil
-}
-
-type PostgreSQL struct {
- *connutil.SQLConnectionProducer
- credsutil.CredentialsProducer
-}
-
-func (p *PostgreSQL) Type() (string, error) {
- return postgreSQLTypeName, nil
-}
-
-func (p *PostgreSQL) getConnection(ctx context.Context) (*sql.DB, error) {
- db, err := p.Connection(ctx)
- if err != nil {
- return nil, err
- }
-
- return db.(*sql.DB), nil
-}
-
-func (p *PostgreSQL) CreateUser(ctx context.Context, statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
- statements = dbutil.StatementCompatibilityHelper(statements)
-
- if len(statements.Creation) == 0 {
- return "", "", dbutil.ErrEmptyCreationStatement
- }
-
- // Grab the lock
- p.Lock()
- defer p.Unlock()
-
- username, err = p.GenerateUsername(usernameConfig)
- if err != nil {
- return "", "", err
- }
-
- password, err = p.GeneratePassword()
- if err != nil {
- return "", "", err
- }
-
- expirationStr, err := p.GenerateExpiration(expiration)
- if err != nil {
- return "", "", err
- }
-
- // Get the connection
- db, err := p.getConnection(ctx)
- if err != nil {
- return "", "", err
- }
-
- // Start a transaction
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return "", "", err
-
- }
- defer func() {
- tx.Rollback()
- }()
- // Return the secret
-
- // Execute each query
- for _, stmt := range statements.Creation {
- for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- m := map[string]string{
- "name": username,
- "password": password,
- "expiration": expirationStr,
- }
- if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil {
- return "", "", err
- }
- }
- }
-
- // Commit the transaction
- if err := tx.Commit(); err != nil {
- return "", "", err
- }
-
- return username, password, nil
-}
-
-func (p *PostgreSQL) RenewUser(ctx context.Context, statements dbplugin.Statements, username string, expiration time.Time) error {
- p.Lock()
- defer p.Unlock()
-
- statements = dbutil.StatementCompatibilityHelper(statements)
-
- renewStmts := statements.Renewal
- if len(renewStmts) == 0 {
- renewStmts = []string{defaultPostgresRenewSQL}
- }
-
- db, err := p.getConnection(ctx)
- if err != nil {
- return err
- }
-
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return err
- }
- defer func() {
- tx.Rollback()
- }()
-
- expirationStr, err := p.GenerateExpiration(expiration)
- if err != nil {
- return err
- }
-
- for _, stmt := range renewStmts {
- for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- m := map[string]string{
- "name": username,
- "expiration": expirationStr,
- }
- if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil {
- return err
- }
- }
- }
-
- return tx.Commit()
-}
-
-func (p *PostgreSQL) RevokeUser(ctx context.Context, statements dbplugin.Statements, username string) error {
- // Grab the lock
- p.Lock()
- defer p.Unlock()
-
- statements = dbutil.StatementCompatibilityHelper(statements)
-
- if len(statements.Revocation) == 0 {
- return p.defaultRevokeUser(ctx, username)
- }
-
- return p.customRevokeUser(ctx, username, statements.Revocation)
-}
-
-func (p *PostgreSQL) customRevokeUser(ctx context.Context, username string, revocationStmts []string) error {
- db, err := p.getConnection(ctx)
- if err != nil {
- return err
- }
-
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return err
- }
- defer func() {
- tx.Rollback()
- }()
-
- for _, stmt := range revocationStmts {
- for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
-
- m := map[string]string{
- "name": username,
- }
- if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil {
- return err
- }
- }
- }
-
- return tx.Commit()
-}
-
-func (p *PostgreSQL) defaultRevokeUser(ctx context.Context, username string) error {
- db, err := p.getConnection(ctx)
- if err != nil {
- return err
- }
-
- // Check if the role exists
- var exists bool
- err = db.QueryRowContext(ctx, "SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists)
- if err != nil && err != sql.ErrNoRows {
- return err
- }
-
- if exists == false {
- return nil
- }
-
- // Query for permissions; we need to revoke permissions before we can drop
- // the role
- // This isn't done in a transaction because even if we fail along the way,
- // we want to remove as much access as possible
- stmt, err := db.PrepareContext(ctx, "SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;")
- if err != nil {
- return err
- }
- defer stmt.Close()
-
- rows, err := stmt.QueryContext(ctx, username)
- if err != nil {
- return err
- }
- defer rows.Close()
-
- const initialNumRevocations = 16
- revocationStmts := make([]string, 0, initialNumRevocations)
- for rows.Next() {
- var schema string
- err = rows.Scan(&schema)
- if err != nil {
- // keep going; remove as many permissions as possible right now
- continue
- }
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`,
- pq.QuoteIdentifier(schema),
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE USAGE ON SCHEMA %s FROM %s;`,
- pq.QuoteIdentifier(schema),
- pq.QuoteIdentifier(username)))
- }
-
- // for good measure, revoke all privileges and usage on schema public
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`,
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;",
- pq.QuoteIdentifier(username)))
-
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- "REVOKE USAGE ON SCHEMA public FROM %s;",
- pq.QuoteIdentifier(username)))
-
- // get the current database name so we can issue a REVOKE CONNECT for
- // this username
- var dbname sql.NullString
- if err := db.QueryRowContext(ctx, "SELECT current_database();").Scan(&dbname); err != nil {
- return err
- }
-
- if dbname.Valid {
- revocationStmts = append(revocationStmts, fmt.Sprintf(
- `REVOKE CONNECT ON DATABASE %s FROM %s;`,
- pq.QuoteIdentifier(dbname.String),
- pq.QuoteIdentifier(username)))
- }
-
- // again, here, we do not stop on error, as we want to remove as
- // many permissions as possible right now
- var lastStmtError error
- for _, query := range revocationStmts {
- if err := dbtxn.ExecuteDBQuery(ctx, db, nil, query); err != nil {
- lastStmtError = err
- }
- }
-
- // can't drop if not all privileges are revoked
- if rows.Err() != nil {
- return errwrap.Wrapf("could not generate revocation statements for all rows: {{err}}", rows.Err())
- }
- if lastStmtError != nil {
- return errwrap.Wrapf("could not perform all revocation statements: {{err}}", lastStmtError)
- }
-
- // Drop this user
- stmt, err = db.PrepareContext(ctx, fmt.Sprintf(
- `DROP ROLE IF EXISTS %s;`, pq.QuoteIdentifier(username)))
- if err != nil {
- return err
- }
- defer stmt.Close()
- if _, err := stmt.ExecContext(ctx); err != nil {
- return err
- }
-
- return nil
-}
-
-func (p *PostgreSQL) RotateRootCredentials(ctx context.Context, statements []string) (map[string]interface{}, error) {
- p.Lock()
- defer p.Unlock()
-
- if len(p.Username) == 0 || len(p.Password) == 0 {
- return nil, errors.New("username and password are required to rotate")
- }
-
- rotateStatents := statements
- if len(rotateStatents) == 0 {
- rotateStatents = []string{defaultPostgresRotateRootCredentialsSQL}
- }
-
- db, err := p.getConnection(ctx)
- if err != nil {
- return nil, err
- }
-
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return nil, err
- }
- defer func() {
- tx.Rollback()
- }()
-
- password, err := p.GeneratePassword()
- if err != nil {
- return nil, err
- }
-
- for _, stmt := range rotateStatents {
- for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
- query = strings.TrimSpace(query)
- if len(query) == 0 {
- continue
- }
- m := map[string]string{
- "username": p.Username,
- "password": password,
- }
- if err := dbtxn.ExecuteTxQuery(ctx, tx, m, query); err != nil {
- return nil, err
- }
- }
- }
-
- if err := tx.Commit(); err != nil {
- return nil, err
- }
-
- // Close the database connection to ensure no new connections come in
- if err := db.Close(); err != nil {
- return nil, err
- }
-
- p.RawConfig["password"] = password
- return p.RawConfig, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go
deleted file mode 100644
index 45f6fa0a..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/connutil.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package connutil
-
-import (
- "context"
- "errors"
- "sync"
-)
-
-var (
- ErrNotInitialized = errors.New("connection has not been initalized")
-)
-
-// ConnectionProducer can be used as an embeded interface in the Database
-// definition. It implements the methods dealing with individual database
-// connections and is used in all the builtin database types.
-type ConnectionProducer interface {
- Close() error
- Init(context.Context, map[string]interface{}, bool) (map[string]interface{}, error)
- Connection(context.Context) (interface{}, error)
-
- sync.Locker
-
- // DEPRECATED, will be removed in 0.12
- Initialize(context.Context, map[string]interface{}, bool) error
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go
deleted file mode 100644
index 38685d0b..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/connutil/sql.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package connutil
-
-import (
- "context"
- "database/sql"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/plugins/helper/database/dbutil"
- "github.com/mitchellh/mapstructure"
-)
-
-var _ ConnectionProducer = &SQLConnectionProducer{}
-
-// SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases
-type SQLConnectionProducer struct {
- ConnectionURL string `json:"connection_url" mapstructure:"connection_url" structs:"connection_url"`
- MaxOpenConnections int `json:"max_open_connections" mapstructure:"max_open_connections" structs:"max_open_connections"`
- MaxIdleConnections int `json:"max_idle_connections" mapstructure:"max_idle_connections" structs:"max_idle_connections"`
- MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"`
- Username string `json:"username" mapstructure:"username" structs:"username"`
- Password string `json:"password" mapstructure:"password" structs:"password"`
-
- Type string
- RawConfig map[string]interface{}
- maxConnectionLifetime time.Duration
- Initialized bool
- db *sql.DB
- sync.Mutex
-}
-
-func (c *SQLConnectionProducer) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error {
- _, err := c.Init(ctx, conf, verifyConnection)
- return err
-}
-
-func (c *SQLConnectionProducer) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) {
- c.Lock()
- defer c.Unlock()
-
- c.RawConfig = conf
-
- err := mapstructure.WeakDecode(conf, &c)
- if err != nil {
- return nil, err
- }
-
- if len(c.ConnectionURL) == 0 {
- return nil, fmt.Errorf("connection_url cannot be empty")
- }
-
- c.ConnectionURL = dbutil.QueryHelper(c.ConnectionURL, map[string]string{
- "username": c.Username,
- "password": c.Password,
- })
-
- if c.MaxOpenConnections == 0 {
- c.MaxOpenConnections = 2
- }
-
- if c.MaxIdleConnections == 0 {
- c.MaxIdleConnections = c.MaxOpenConnections
- }
- if c.MaxIdleConnections > c.MaxOpenConnections {
- c.MaxIdleConnections = c.MaxOpenConnections
- }
- if c.MaxConnectionLifetimeRaw == nil {
- c.MaxConnectionLifetimeRaw = "0s"
- }
-
- c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw)
- if err != nil {
- return nil, errwrap.Wrapf("invalid max_connection_lifetime: {{err}}", err)
- }
-
- // Set initialized to true at this point since all fields are set,
- // and the connection can be established at a later time.
- c.Initialized = true
-
- if verifyConnection {
- if _, err := c.Connection(ctx); err != nil {
- return nil, errwrap.Wrapf("error verifying connection: {{err}}", err)
- }
-
- if err := c.db.PingContext(ctx); err != nil {
- return nil, errwrap.Wrapf("error verifying connection: {{err}}", err)
- }
- }
-
- return c.RawConfig, nil
-}
-
-func (c *SQLConnectionProducer) Connection(ctx context.Context) (interface{}, error) {
- if !c.Initialized {
- return nil, ErrNotInitialized
- }
-
- // If we already have a DB, test it and return
- if c.db != nil {
- if err := c.db.PingContext(ctx); err == nil {
- return c.db, nil
- }
- // If the ping was unsuccessful, close it and ignore errors as we'll be
- // reestablishing anyways
- c.db.Close()
- }
-
- // For mssql backend, switch to sqlserver instead
- dbType := c.Type
- if c.Type == "mssql" {
- dbType = "sqlserver"
- }
-
- // Otherwise, attempt to make connection
- conn := c.ConnectionURL
-
- // Ensure timezone is set to UTC for all the connections
- if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") {
- if strings.Contains(conn, "?") {
- conn += "&timezone=utc"
- } else {
- conn += "?timezone=utc"
- }
- }
-
- var err error
- c.db, err = sql.Open(dbType, conn)
- if err != nil {
- return nil, err
- }
-
- // Set some connection pool settings. We don't need much of this,
- // since the request rate shouldn't be high.
- c.db.SetMaxOpenConns(c.MaxOpenConnections)
- c.db.SetMaxIdleConns(c.MaxIdleConnections)
- c.db.SetConnMaxLifetime(c.maxConnectionLifetime)
-
- return c.db, nil
-}
-
-func (c *SQLConnectionProducer) SecretValues() map[string]interface{} {
- return map[string]interface{}{
- c.Password: "[password]",
- }
-}
-
-// Close attempts to close the connection
-func (c *SQLConnectionProducer) Close() error {
- // Grab the write lock
- c.Lock()
- defer c.Unlock()
-
- if c.db != nil {
- c.db.Close()
- }
-
- c.db = nil
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go
deleted file mode 100644
index 65046028..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/credsutil.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package credsutil
-
-import (
- "time"
-
- "fmt"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/base62"
-)
-
-// CredentialsProducer can be used as an embeded interface in the Database
-// definition. It implements the methods for generating user information for a
-// particular database type and is used in all the builtin database types.
-type CredentialsProducer interface {
- GenerateUsername(usernameConfig dbplugin.UsernameConfig) (string, error)
- GeneratePassword() (string, error)
- GenerateExpiration(ttl time.Time) (string, error)
-}
-
-const (
- reqStr = `A1a-`
- minStrLen = 10
-)
-
-// RandomAlphaNumeric returns a random string of characters [A-Za-z0-9-]
-// of the provided length. The string generated takes up to 4 characters
-// of space that are predefined and prepended to ensure password
-// character requirements. It also requires a min length of 10 characters.
-func RandomAlphaNumeric(length int, prependA1a bool) (string, error) {
- if length < minStrLen {
- return "", fmt.Errorf("minimum length of %d is required", minStrLen)
- }
-
- var prefix string
- if prependA1a {
- prefix = reqStr
- }
-
- randomStr, err := base62.Random(length-len(prefix), true)
- if err != nil {
- return "", err
- }
-
- return prefix + randomStr, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go
deleted file mode 100644
index 2f9cc7d1..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/credsutil/sql.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package credsutil
-
-import (
- "fmt"
- "time"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
-)
-
-const (
- NoneLength int = -1
-)
-
-// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types.
-type SQLCredentialsProducer struct {
- DisplayNameLen int
- RoleNameLen int
- UsernameLen int
- Separator string
-}
-
-func (scp *SQLCredentialsProducer) GenerateUsername(config dbplugin.UsernameConfig) (string, error) {
- username := "v"
-
- displayName := config.DisplayName
- if scp.DisplayNameLen > 0 && len(displayName) > scp.DisplayNameLen {
- displayName = displayName[:scp.DisplayNameLen]
- } else if scp.DisplayNameLen == NoneLength {
- displayName = ""
- }
-
- if len(displayName) > 0 {
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, displayName)
- }
-
- roleName := config.RoleName
- if scp.RoleNameLen > 0 && len(roleName) > scp.RoleNameLen {
- roleName = roleName[:scp.RoleNameLen]
- } else if scp.RoleNameLen == NoneLength {
- roleName = ""
- }
-
- if len(roleName) > 0 {
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, roleName)
- }
-
- userUUID, err := RandomAlphaNumeric(20, false)
- if err != nil {
- return "", err
- }
-
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, userUUID)
- username = fmt.Sprintf("%s%s%s", username, scp.Separator, fmt.Sprint(time.Now().Unix()))
- if scp.UsernameLen > 0 && len(username) > scp.UsernameLen {
- username = username[:scp.UsernameLen]
- }
-
- return username, nil
-}
-
-func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) {
- password, err := RandomAlphaNumeric(20, true)
- if err != nil {
- return "", err
- }
-
- return password, nil
-}
-
-func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) {
- return ttl.Format("2006-01-02 15:04:05-0700"), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go b/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go
deleted file mode 100644
index 42257053..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/helper/database/dbutil/dbutil.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package dbutil
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
-)
-
-var (
- ErrEmptyCreationStatement = errors.New("empty creation statements")
-)
-
-// Query templates a query for us.
-func QueryHelper(tpl string, data map[string]string) string {
- for k, v := range data {
- tpl = strings.Replace(tpl, fmt.Sprintf("{{%s}}", k), v, -1)
- }
-
- return tpl
-}
-
-// StatementCompatibilityHelper will populate the statements fields to support
-// compatibility
-func StatementCompatibilityHelper(statements dbplugin.Statements) dbplugin.Statements {
- switch {
- case len(statements.Creation) > 0 && len(statements.CreationStatements) == 0:
- statements.CreationStatements = strings.Join(statements.Creation, ";")
- case len(statements.CreationStatements) > 0:
- statements.Creation = []string{statements.CreationStatements}
- }
- switch {
- case len(statements.Revocation) > 0 && len(statements.RevocationStatements) == 0:
- statements.RevocationStatements = strings.Join(statements.Revocation, ";")
- case len(statements.RevocationStatements) > 0:
- statements.Revocation = []string{statements.RevocationStatements}
- }
- switch {
- case len(statements.Renewal) > 0 && len(statements.RenewStatements) == 0:
- statements.RenewStatements = strings.Join(statements.Renewal, ";")
- case len(statements.RenewStatements) > 0:
- statements.Renewal = []string{statements.RenewStatements}
- }
- switch {
- case len(statements.Rollback) > 0 && len(statements.RollbackStatements) == 0:
- statements.RollbackStatements = strings.Join(statements.Rollback, ";")
- case len(statements.RollbackStatements) > 0:
- statements.Rollback = []string{statements.RollbackStatements}
- }
- return statements
-}
diff --git a/vendor/github.com/hashicorp/vault/plugins/serve.go b/vendor/github.com/hashicorp/vault/plugins/serve.go
deleted file mode 100644
index 0bc3bc4e..00000000
--- a/vendor/github.com/hashicorp/vault/plugins/serve.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package plugins
-
-import (
- "fmt"
-
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/pluginutil"
-)
-
-// Serve is used to start a plugin's RPC server. It takes an interface that must
-// implement a known plugin interface to vault and an optional api.TLSConfig for
-// use during the inital unwrap request to vault. The api config is particularly
-// useful when vault is setup to require client cert checking.
-func Serve(plugin interface{}, tlsConfig *api.TLSConfig) {
- tlsProvider := pluginutil.VaultPluginTLSProvider(tlsConfig)
-
- err := pluginutil.OptionallyEnableMlock()
- if err != nil {
- fmt.Println(err)
- return
- }
-
- switch p := plugin.(type) {
- case dbplugin.Database:
- dbplugin.Serve(p, tlsProvider)
- default:
- fmt.Println("Unsupported plugin type")
- }
-
-}
diff --git a/vendor/github.com/hashicorp/vault/shamir/shamir.go b/vendor/github.com/hashicorp/vault/shamir/shamir.go
deleted file mode 100644
index 04650868..00000000
--- a/vendor/github.com/hashicorp/vault/shamir/shamir.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package shamir
-
-import (
- "crypto/rand"
- "crypto/subtle"
- "fmt"
- mathrand "math/rand"
- "time"
-
- "github.com/hashicorp/errwrap"
-)
-
-const (
- // ShareOverhead is the byte size overhead of each share
- // when using Split on a secret. This is caused by appending
- // a one byte tag to the share.
- ShareOverhead = 1
-)
-
-// polynomial represents a polynomial of arbitrary degree
-type polynomial struct {
- coefficients []uint8
-}
-
-// makePolynomial constructs a random polynomial of the given
-// degree but with the provided intercept value.
-func makePolynomial(intercept, degree uint8) (polynomial, error) {
- // Create a wrapper
- p := polynomial{
- coefficients: make([]byte, degree+1),
- }
-
- // Ensure the intercept is set
- p.coefficients[0] = intercept
-
- // Assign random co-efficients to the polynomial
- if _, err := rand.Read(p.coefficients[1:]); err != nil {
- return p, err
- }
-
- return p, nil
-}
-
-// evaluate returns the value of the polynomial for the given x
-func (p *polynomial) evaluate(x uint8) uint8 {
- // Special case the origin
- if x == 0 {
- return p.coefficients[0]
- }
-
- // Compute the polynomial value using Horner's method.
- degree := len(p.coefficients) - 1
- out := p.coefficients[degree]
- for i := degree - 1; i >= 0; i-- {
- coeff := p.coefficients[i]
- out = add(mult(out, x), coeff)
- }
- return out
-}
-
-// interpolatePolynomial takes N sample points and returns
-// the value at a given x using a lagrange interpolation.
-func interpolatePolynomial(x_samples, y_samples []uint8, x uint8) uint8 {
- limit := len(x_samples)
- var result, basis uint8
- for i := 0; i < limit; i++ {
- basis = 1
- for j := 0; j < limit; j++ {
- if i == j {
- continue
- }
- num := add(x, x_samples[j])
- denom := add(x_samples[i], x_samples[j])
- term := div(num, denom)
- basis = mult(basis, term)
- }
- group := mult(y_samples[i], basis)
- result = add(result, group)
- }
- return result
-}
-
-// div divides two numbers in GF(2^8)
-func div(a, b uint8) uint8 {
- if b == 0 {
- // leaks some timing information but we don't care anyways as this
- // should never happen, hence the panic
- panic("divide by zero")
- }
-
- var goodVal, zero uint8
- log_a := logTable[a]
- log_b := logTable[b]
- diff := (int(log_a) - int(log_b)) % 255
- if diff < 0 {
- diff += 255
- }
-
- ret := expTable[diff]
-
- // Ensure we return zero if a is zero but aren't subject to timing attacks
- goodVal = ret
-
- if subtle.ConstantTimeByteEq(a, 0) == 1 {
- ret = zero
- } else {
- ret = goodVal
- }
-
- return ret
-}
-
-// mult multiplies two numbers in GF(2^8)
-func mult(a, b uint8) (out uint8) {
- var goodVal, zero uint8
- log_a := logTable[a]
- log_b := logTable[b]
- sum := (int(log_a) + int(log_b)) % 255
-
- ret := expTable[sum]
-
- // Ensure we return zero if either a or be are zero but aren't subject to
- // timing attacks
- goodVal = ret
-
- if subtle.ConstantTimeByteEq(a, 0) == 1 {
- ret = zero
- } else {
- ret = goodVal
- }
-
- if subtle.ConstantTimeByteEq(b, 0) == 1 {
- ret = zero
- } else {
- // This operation does not do anything logically useful. It
- // only ensures a constant number of assignments to thwart
- // timing attacks.
- goodVal = zero
- }
-
- return ret
-}
-
-// add combines two numbers in GF(2^8)
-// This can also be used for subtraction since it is symmetric.
-func add(a, b uint8) uint8 {
- return a ^ b
-}
-
-// Split takes an arbitrarily long secret and generates a `parts`
-// number of shares, `threshold` of which are required to reconstruct
-// the secret. The parts and threshold must be at least 2, and less
-// than 256. The returned shares are each one byte longer than the secret
-// as they attach a tag used to reconstruct the secret.
-func Split(secret []byte, parts, threshold int) ([][]byte, error) {
- // Sanity check the input
- if parts < threshold {
- return nil, fmt.Errorf("parts cannot be less than threshold")
- }
- if parts > 255 {
- return nil, fmt.Errorf("parts cannot exceed 255")
- }
- if threshold < 2 {
- return nil, fmt.Errorf("threshold must be at least 2")
- }
- if threshold > 255 {
- return nil, fmt.Errorf("threshold cannot exceed 255")
- }
- if len(secret) == 0 {
- return nil, fmt.Errorf("cannot split an empty secret")
- }
-
- // Generate random list of x coordinates
- mathrand.Seed(time.Now().UnixNano())
- xCoordinates := mathrand.Perm(255)
-
- // Allocate the output array, initialize the final byte
- // of the output with the offset. The representation of each
- // output is {y1, y2, .., yN, x}.
- out := make([][]byte, parts)
- for idx := range out {
- out[idx] = make([]byte, len(secret)+1)
- out[idx][len(secret)] = uint8(xCoordinates[idx]) + 1
- }
-
- // Construct a random polynomial for each byte of the secret.
- // Because we are using a field of size 256, we can only represent
- // a single byte as the intercept of the polynomial, so we must
- // use a new polynomial for each byte.
- for idx, val := range secret {
- p, err := makePolynomial(val, uint8(threshold-1))
- if err != nil {
- return nil, errwrap.Wrapf("failed to generate polynomial: {{err}}", err)
- }
-
- // Generate a `parts` number of (x,y) pairs
- // We cheat by encoding the x value once as the final index,
- // so that it only needs to be stored once.
- for i := 0; i < parts; i++ {
- x := uint8(xCoordinates[i]) + 1
- y := p.evaluate(x)
- out[i][idx] = y
- }
- }
-
- // Return the encoded secrets
- return out, nil
-}
-
-// Combine is used to reverse a Split and reconstruct a secret
-// once a `threshold` number of parts are available.
-func Combine(parts [][]byte) ([]byte, error) {
- // Verify enough parts provided
- if len(parts) < 2 {
- return nil, fmt.Errorf("less than two parts cannot be used to reconstruct the secret")
- }
-
- // Verify the parts are all the same length
- firstPartLen := len(parts[0])
- if firstPartLen < 2 {
- return nil, fmt.Errorf("parts must be at least two bytes")
- }
- for i := 1; i < len(parts); i++ {
- if len(parts[i]) != firstPartLen {
- return nil, fmt.Errorf("all parts must be the same length")
- }
- }
-
- // Create a buffer to store the reconstructed secret
- secret := make([]byte, firstPartLen-1)
-
- // Buffer to store the samples
- x_samples := make([]uint8, len(parts))
- y_samples := make([]uint8, len(parts))
-
- // Set the x value for each sample and ensure no x_sample values are the same,
- // otherwise div() can be unhappy
- checkMap := map[byte]bool{}
- for i, part := range parts {
- samp := part[firstPartLen-1]
- if exists := checkMap[samp]; exists {
- return nil, fmt.Errorf("duplicate part detected")
- }
- checkMap[samp] = true
- x_samples[i] = samp
- }
-
- // Reconstruct each byte
- for idx := range secret {
- // Set the y value for each sample
- for i, part := range parts {
- y_samples[i] = part[idx]
- }
-
- // Interpolate the polynomial and compute the value at 0
- val := interpolatePolynomial(x_samples, y_samples, 0)
-
- // Evaluate the 0th value to get the intercept
- secret[idx] = val
- }
- return secret, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/shamir/tables.go b/vendor/github.com/hashicorp/vault/shamir/tables.go
deleted file mode 100644
index 76c245e7..00000000
--- a/vendor/github.com/hashicorp/vault/shamir/tables.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package shamir
-
-// Tables taken from http://www.samiam.org/galois.html
-// They use 0xe5 (229) as the generator
-
-var (
- // logTable provides the log(X)/log(g) at each index X
- logTable = [256]uint8{
- 0x00, 0xff, 0xc8, 0x08, 0x91, 0x10, 0xd0, 0x36,
- 0x5a, 0x3e, 0xd8, 0x43, 0x99, 0x77, 0xfe, 0x18,
- 0x23, 0x20, 0x07, 0x70, 0xa1, 0x6c, 0x0c, 0x7f,
- 0x62, 0x8b, 0x40, 0x46, 0xc7, 0x4b, 0xe0, 0x0e,
- 0xeb, 0x16, 0xe8, 0xad, 0xcf, 0xcd, 0x39, 0x53,
- 0x6a, 0x27, 0x35, 0x93, 0xd4, 0x4e, 0x48, 0xc3,
- 0x2b, 0x79, 0x54, 0x28, 0x09, 0x78, 0x0f, 0x21,
- 0x90, 0x87, 0x14, 0x2a, 0xa9, 0x9c, 0xd6, 0x74,
- 0xb4, 0x7c, 0xde, 0xed, 0xb1, 0x86, 0x76, 0xa4,
- 0x98, 0xe2, 0x96, 0x8f, 0x02, 0x32, 0x1c, 0xc1,
- 0x33, 0xee, 0xef, 0x81, 0xfd, 0x30, 0x5c, 0x13,
- 0x9d, 0x29, 0x17, 0xc4, 0x11, 0x44, 0x8c, 0x80,
- 0xf3, 0x73, 0x42, 0x1e, 0x1d, 0xb5, 0xf0, 0x12,
- 0xd1, 0x5b, 0x41, 0xa2, 0xd7, 0x2c, 0xe9, 0xd5,
- 0x59, 0xcb, 0x50, 0xa8, 0xdc, 0xfc, 0xf2, 0x56,
- 0x72, 0xa6, 0x65, 0x2f, 0x9f, 0x9b, 0x3d, 0xba,
- 0x7d, 0xc2, 0x45, 0x82, 0xa7, 0x57, 0xb6, 0xa3,
- 0x7a, 0x75, 0x4f, 0xae, 0x3f, 0x37, 0x6d, 0x47,
- 0x61, 0xbe, 0xab, 0xd3, 0x5f, 0xb0, 0x58, 0xaf,
- 0xca, 0x5e, 0xfa, 0x85, 0xe4, 0x4d, 0x8a, 0x05,
- 0xfb, 0x60, 0xb7, 0x7b, 0xb8, 0x26, 0x4a, 0x67,
- 0xc6, 0x1a, 0xf8, 0x69, 0x25, 0xb3, 0xdb, 0xbd,
- 0x66, 0xdd, 0xf1, 0xd2, 0xdf, 0x03, 0x8d, 0x34,
- 0xd9, 0x92, 0x0d, 0x63, 0x55, 0xaa, 0x49, 0xec,
- 0xbc, 0x95, 0x3c, 0x84, 0x0b, 0xf5, 0xe6, 0xe7,
- 0xe5, 0xac, 0x7e, 0x6e, 0xb9, 0xf9, 0xda, 0x8e,
- 0x9a, 0xc9, 0x24, 0xe1, 0x0a, 0x15, 0x6b, 0x3a,
- 0xa0, 0x51, 0xf4, 0xea, 0xb2, 0x97, 0x9e, 0x5d,
- 0x22, 0x88, 0x94, 0xce, 0x19, 0x01, 0x71, 0x4c,
- 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d,
- 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0,
- 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38}
-
- // expTable provides the anti-log or exponentiation value
- // for the equivalent index
- expTable = [256]uint8{
- 0x01, 0xe5, 0x4c, 0xb5, 0xfb, 0x9f, 0xfc, 0x12,
- 0x03, 0x34, 0xd4, 0xc4, 0x16, 0xba, 0x1f, 0x36,
- 0x05, 0x5c, 0x67, 0x57, 0x3a, 0xd5, 0x21, 0x5a,
- 0x0f, 0xe4, 0xa9, 0xf9, 0x4e, 0x64, 0x63, 0xee,
- 0x11, 0x37, 0xe0, 0x10, 0xd2, 0xac, 0xa5, 0x29,
- 0x33, 0x59, 0x3b, 0x30, 0x6d, 0xef, 0xf4, 0x7b,
- 0x55, 0xeb, 0x4d, 0x50, 0xb7, 0x2a, 0x07, 0x8d,
- 0xff, 0x26, 0xd7, 0xf0, 0xc2, 0x7e, 0x09, 0x8c,
- 0x1a, 0x6a, 0x62, 0x0b, 0x5d, 0x82, 0x1b, 0x8f,
- 0x2e, 0xbe, 0xa6, 0x1d, 0xe7, 0x9d, 0x2d, 0x8a,
- 0x72, 0xd9, 0xf1, 0x27, 0x32, 0xbc, 0x77, 0x85,
- 0x96, 0x70, 0x08, 0x69, 0x56, 0xdf, 0x99, 0x94,
- 0xa1, 0x90, 0x18, 0xbb, 0xfa, 0x7a, 0xb0, 0xa7,
- 0xf8, 0xab, 0x28, 0xd6, 0x15, 0x8e, 0xcb, 0xf2,
- 0x13, 0xe6, 0x78, 0x61, 0x3f, 0x89, 0x46, 0x0d,
- 0x35, 0x31, 0x88, 0xa3, 0x41, 0x80, 0xca, 0x17,
- 0x5f, 0x53, 0x83, 0xfe, 0xc3, 0x9b, 0x45, 0x39,
- 0xe1, 0xf5, 0x9e, 0x19, 0x5e, 0xb6, 0xcf, 0x4b,
- 0x38, 0x04, 0xb9, 0x2b, 0xe2, 0xc1, 0x4a, 0xdd,
- 0x48, 0x0c, 0xd0, 0x7d, 0x3d, 0x58, 0xde, 0x7c,
- 0xd8, 0x14, 0x6b, 0x87, 0x47, 0xe8, 0x79, 0x84,
- 0x73, 0x3c, 0xbd, 0x92, 0xc9, 0x23, 0x8b, 0x97,
- 0x95, 0x44, 0xdc, 0xad, 0x40, 0x65, 0x86, 0xa2,
- 0xa4, 0xcc, 0x7f, 0xec, 0xc0, 0xaf, 0x91, 0xfd,
- 0xf7, 0x4f, 0x81, 0x2f, 0x5b, 0xea, 0xa8, 0x1c,
- 0x02, 0xd1, 0x98, 0x71, 0xed, 0x25, 0xe3, 0x24,
- 0x06, 0x68, 0xb3, 0x93, 0x2c, 0x6f, 0x3e, 0x6c,
- 0x0a, 0xb8, 0xce, 0xae, 0x74, 0xb1, 0x42, 0xb4,
- 0x1e, 0xd3, 0x49, 0xe9, 0x9c, 0xc8, 0xc6, 0xc7,
- 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52,
- 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6,
- 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01}
-)
diff --git a/vendor/github.com/hashicorp/vault/vault/acl.go b/vendor/github.com/hashicorp/vault/vault/acl.go
deleted file mode 100644
index e16cf1ae..00000000
--- a/vendor/github.com/hashicorp/vault/vault/acl.go
+++ /dev/null
@@ -1,523 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "reflect"
- "strings"
-
- radix "github.com/armon/go-radix"
- "github.com/hashicorp/errwrap"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/copystructure"
-)
-
-// ACL is used to wrap a set of policies to provide
-// an efficient interface for access control.
-type ACL struct {
- // exactRules contains the path policies that are exact
- exactRules *radix.Tree
-
- // globRules contains the path policies that glob
- globRules *radix.Tree
-
- // root is enabled if the "root" named policy is present.
- root bool
-
- // Stores policies that are actually RGPs for later fetching
- rgpPolicies []*Policy
-}
-
-type PolicyCheckOpts struct {
- RootPrivsRequired bool
- Unauth bool
-}
-
-type AuthResults struct {
- ACLResults *ACLResults
- Allowed bool
- RootPrivs bool
- DeniedError bool
- Error *multierror.Error
-}
-
-type ACLResults struct {
- Allowed bool
- RootPrivs bool
- IsRoot bool
- MFAMethods []string
- ControlGroup *ControlGroup
- CapabilitiesBitmap uint32
-}
-
-// NewACL is used to construct a policy based ACL from a set of policies.
-func NewACL(ctx context.Context, policies []*Policy) (*ACL, error) {
- // Initialize
- a := &ACL{
- exactRules: radix.New(),
- globRules: radix.New(),
- root: false,
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- // Inject each policy
- for _, policy := range policies {
- // Ignore a nil policy object
- if policy == nil {
- continue
- }
-
- switch policy.Type {
- case PolicyTypeACL:
- case PolicyTypeRGP:
- a.rgpPolicies = append(a.rgpPolicies, policy)
- continue
- default:
- return nil, fmt.Errorf("unable to parse policy (wrong type)")
- }
-
- // Check if this is root
- if policy.Name == "root" {
- if ns.ID != namespace.RootNamespaceID {
- return nil, fmt.Errorf("root policy is only allowed in root namespace")
- }
-
- if len(policies) != 1 {
- return nil, fmt.Errorf("other policies present along with root")
- }
- a.root = true
- }
-
- for _, pc := range policy.Paths {
- // Check which tree to use
- tree := a.exactRules
- if pc.Glob {
- tree = a.globRules
- }
-
- // Check for an existing policy
- raw, ok := tree.Get(pc.Prefix)
- if !ok {
- clonedPerms, err := pc.Permissions.Clone()
- if err != nil {
- return nil, errwrap.Wrapf("error cloning ACL permissions: {{err}}", err)
- }
- tree.Insert(pc.Prefix, clonedPerms)
- continue
- }
-
- // these are the ones already in the tree
- existingPerms := raw.(*ACLPermissions)
-
- switch {
- case existingPerms.CapabilitiesBitmap&DenyCapabilityInt > 0:
- // If we are explicitly denied in the existing capability set,
- // don't save anything else
- continue
-
- case pc.Permissions.CapabilitiesBitmap&DenyCapabilityInt > 0:
- // If this new policy explicitly denies, only save the deny value
- existingPerms.CapabilitiesBitmap = DenyCapabilityInt
- existingPerms.AllowedParameters = nil
- existingPerms.DeniedParameters = nil
- goto INSERT
-
- default:
- // Insert the capabilities in this new policy into the existing
- // value
- existingPerms.CapabilitiesBitmap = existingPerms.CapabilitiesBitmap | pc.Permissions.CapabilitiesBitmap
- }
-
- // Note: In these stanzas, we're preferring minimum lifetimes. So
- // we take the lesser of two specified max values, or we take the
- // lesser of two specified min values, the idea being, allowing
- // token lifetime to be minimum possible.
- //
- // If we have an existing max, and we either don't have a current
- // max, or the current is greater than the previous, use the
- // existing.
- if pc.Permissions.MaxWrappingTTL > 0 &&
- (existingPerms.MaxWrappingTTL == 0 ||
- pc.Permissions.MaxWrappingTTL < existingPerms.MaxWrappingTTL) {
- existingPerms.MaxWrappingTTL = pc.Permissions.MaxWrappingTTL
- }
- // If we have an existing min, and we either don't have a current
- // min, or the current is greater than the previous, use the
- // existing
- if pc.Permissions.MinWrappingTTL > 0 &&
- (existingPerms.MinWrappingTTL == 0 ||
- pc.Permissions.MinWrappingTTL < existingPerms.MinWrappingTTL) {
- existingPerms.MinWrappingTTL = pc.Permissions.MinWrappingTTL
- }
-
- if len(pc.Permissions.AllowedParameters) > 0 {
- if existingPerms.AllowedParameters == nil {
- clonedAllowed, err := copystructure.Copy(pc.Permissions.AllowedParameters)
- if err != nil {
- return nil, err
- }
- existingPerms.AllowedParameters = clonedAllowed.(map[string][]interface{})
- } else {
- for key, value := range pc.Permissions.AllowedParameters {
- pcValue, ok := existingPerms.AllowedParameters[key]
- // If an empty array exist it should overwrite any other
- // value.
- if len(value) == 0 || (ok && len(pcValue) == 0) {
- existingPerms.AllowedParameters[key] = []interface{}{}
- } else {
- // Merge the two maps, appending values on key conflict.
- existingPerms.AllowedParameters[key] = append(value, existingPerms.AllowedParameters[key]...)
- }
- }
- }
- }
-
- if len(pc.Permissions.DeniedParameters) > 0 {
- if existingPerms.DeniedParameters == nil {
- clonedDenied, err := copystructure.Copy(pc.Permissions.DeniedParameters)
- if err != nil {
- return nil, err
- }
- existingPerms.DeniedParameters = clonedDenied.(map[string][]interface{})
- } else {
- for key, value := range pc.Permissions.DeniedParameters {
- pcValue, ok := existingPerms.DeniedParameters[key]
- // If an empty array exist it should overwrite any other
- // value.
- if len(value) == 0 || (ok && len(pcValue) == 0) {
- existingPerms.DeniedParameters[key] = []interface{}{}
- } else {
- // Merge the two maps, appending values on key conflict.
- existingPerms.DeniedParameters[key] = append(value, existingPerms.DeniedParameters[key]...)
- }
- }
- }
- }
-
- if len(pc.Permissions.RequiredParameters) > 0 {
- if len(existingPerms.RequiredParameters) == 0 {
- existingPerms.RequiredParameters = pc.Permissions.RequiredParameters
- } else {
- for _, v := range pc.Permissions.RequiredParameters {
- if !strutil.StrListContains(existingPerms.RequiredParameters, v) {
- existingPerms.RequiredParameters = append(existingPerms.RequiredParameters, v)
- }
- }
- }
- }
-
- if len(pc.Permissions.MFAMethods) > 0 {
- if existingPerms.MFAMethods == nil {
- existingPerms.MFAMethods = pc.Permissions.MFAMethods
- } else {
- for _, method := range pc.Permissions.MFAMethods {
- existingPerms.MFAMethods = append(existingPerms.MFAMethods, method)
- }
- }
- existingPerms.MFAMethods = strutil.RemoveDuplicates(existingPerms.MFAMethods, false)
- }
-
- // No need to dedupe this list since any authorization can satisfy any factor
- if pc.Permissions.ControlGroup != nil {
- if len(pc.Permissions.ControlGroup.Factors) > 0 {
- if existingPerms.ControlGroup == nil {
- existingPerms.ControlGroup = pc.Permissions.ControlGroup
- } else {
- for _, authz := range pc.Permissions.ControlGroup.Factors {
- existingPerms.ControlGroup.Factors = append(existingPerms.ControlGroup.Factors, authz)
- }
- }
- }
- }
-
- INSERT:
- tree.Insert(pc.Prefix, existingPerms)
- }
- }
- return a, nil
-}
-
-func (a *ACL) Capabilities(ctx context.Context, path string) (pathCapabilities []string) {
- req := &logical.Request{
- Path: path,
- // doesn't matter, but use List to trigger fallback behavior so we can
- // model real behavior
- Operation: logical.ListOperation,
- }
-
- res := a.AllowOperation(ctx, req, true)
- if res.IsRoot {
- return []string{RootCapability}
- }
-
- capabilities := res.CapabilitiesBitmap
-
- if capabilities&SudoCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, SudoCapability)
- }
- if capabilities&ReadCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, ReadCapability)
- }
- if capabilities&ListCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, ListCapability)
- }
- if capabilities&UpdateCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, UpdateCapability)
- }
- if capabilities&DeleteCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, DeleteCapability)
- }
- if capabilities&CreateCapabilityInt > 0 {
- pathCapabilities = append(pathCapabilities, CreateCapability)
- }
-
- // If "deny" is explicitly set or if the path has no capabilities at all,
- // set the path capabilities to "deny"
- if capabilities&DenyCapabilityInt > 0 || len(pathCapabilities) == 0 {
- pathCapabilities = []string{DenyCapability}
- }
- return
-}
-
-// AllowOperation is used to check if the given operation is permitted.
-func (a *ACL) AllowOperation(ctx context.Context, req *logical.Request, capCheckOnly bool) (ret *ACLResults) {
- ret = new(ACLResults)
-
- // Fast-path root
- if a.root {
- ret.Allowed = true
- ret.RootPrivs = true
- ret.IsRoot = true
- return
- }
- op := req.Operation
-
- // Help is always allowed
- if op == logical.HelpOperation {
- ret.Allowed = true
- return
- }
-
- var permissions *ACLPermissions
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return
- }
- path := ns.Path + req.Path
-
- // Find an exact matching rule, look for glob if no match
- var capabilities uint32
- raw, ok := a.exactRules.Get(path)
- if ok {
- permissions = raw.(*ACLPermissions)
- capabilities = permissions.CapabilitiesBitmap
- goto CHECK
- }
- if op == logical.ListOperation {
- raw, ok = a.exactRules.Get(strings.TrimSuffix(path, "/"))
- if ok {
- permissions = raw.(*ACLPermissions)
- capabilities = permissions.CapabilitiesBitmap
- goto CHECK
- }
- }
-
- // Find a glob rule, default deny if no match
- _, raw, ok = a.globRules.LongestPrefix(path)
- if !ok {
- return
- }
- permissions = raw.(*ACLPermissions)
- capabilities = permissions.CapabilitiesBitmap
-
-CHECK:
- // Check if the minimum permissions are met
- // If "deny" has been explicitly set, only deny will be in the map, so we
- // only need to check for the existence of other values
- ret.RootPrivs = capabilities&SudoCapabilityInt > 0
-
- // This is after the RootPrivs check so we can gate on it being from sudo
- // rather than policy root
- if capCheckOnly {
- ret.CapabilitiesBitmap = capabilities
- return ret
- }
-
- ret.MFAMethods = permissions.MFAMethods
- ret.ControlGroup = permissions.ControlGroup
-
- operationAllowed := false
- switch op {
- case logical.ReadOperation:
- operationAllowed = capabilities&ReadCapabilityInt > 0
- case logical.ListOperation:
- operationAllowed = capabilities&ListCapabilityInt > 0
- case logical.UpdateOperation:
- operationAllowed = capabilities&UpdateCapabilityInt > 0
- case logical.DeleteOperation:
- operationAllowed = capabilities&DeleteCapabilityInt > 0
- case logical.CreateOperation:
- operationAllowed = capabilities&CreateCapabilityInt > 0
-
- // These three re-use UpdateCapabilityInt since that's the most appropriate
- // capability/operation mapping
- case logical.RevokeOperation, logical.RenewOperation, logical.RollbackOperation:
- operationAllowed = capabilities&UpdateCapabilityInt > 0
-
- default:
- return
- }
-
- if !operationAllowed {
- return
- }
-
- if permissions.MaxWrappingTTL > 0 {
- if req.WrapInfo == nil || req.WrapInfo.TTL > permissions.MaxWrappingTTL {
- return
- }
- }
- if permissions.MinWrappingTTL > 0 {
- if req.WrapInfo == nil || req.WrapInfo.TTL < permissions.MinWrappingTTL {
- return
- }
- }
- // This situation can happen because of merging, even though in a single
- // path statement we check on ingress
- if permissions.MinWrappingTTL != 0 &&
- permissions.MaxWrappingTTL != 0 &&
- permissions.MaxWrappingTTL < permissions.MinWrappingTTL {
- return
- }
-
- // Only check parameter permissions for operations that can modify
- // parameters.
- if op == logical.ReadOperation || op == logical.UpdateOperation || op == logical.CreateOperation {
- for _, parameter := range permissions.RequiredParameters {
- if _, ok := req.Data[strings.ToLower(parameter)]; !ok {
- return
- }
- }
-
- // If there are no data fields, allow
- if len(req.Data) == 0 {
- ret.Allowed = true
- return
- }
-
- if len(permissions.DeniedParameters) == 0 {
- goto ALLOWED_PARAMETERS
- }
-
- // Check if all parameters have been denied
- if _, ok := permissions.DeniedParameters["*"]; ok {
- return
- }
-
- for parameter, value := range req.Data {
- // Check if parameter has been explicitly denied
- if valueSlice, ok := permissions.DeniedParameters[strings.ToLower(parameter)]; ok {
- // If the value exists in denied values slice, deny
- if valueInParameterList(value, valueSlice) {
- return
- }
- }
- }
-
- ALLOWED_PARAMETERS:
- // If we don't have any allowed parameters set, allow
- if len(permissions.AllowedParameters) == 0 {
- ret.Allowed = true
- return
- }
-
- _, allowedAll := permissions.AllowedParameters["*"]
- if len(permissions.AllowedParameters) == 1 && allowedAll {
- ret.Allowed = true
- return
- }
-
- for parameter, value := range req.Data {
- valueSlice, ok := permissions.AllowedParameters[strings.ToLower(parameter)]
- // Requested parameter is not in allowed list
- if !ok && !allowedAll {
- return
- }
-
- // If the value doesn't exists in the allowed values slice,
- // deny
- if ok && !valueInParameterList(value, valueSlice) {
- return
- }
- }
- }
-
- ret.Allowed = true
- return
-}
-
-func (c *Core) performPolicyChecks(ctx context.Context, acl *ACL, te *logical.TokenEntry, req *logical.Request, inEntity *identity.Entity, opts *PolicyCheckOpts) *AuthResults {
- ret := new(AuthResults)
-
- // First, perform normal ACL checks if requested. The only time no ACL
- // should be applied is if we are only processing EGPs against a login
- // path in which case opts.Unauth will be set.
- if acl != nil && !opts.Unauth {
- ret.ACLResults = acl.AllowOperation(ctx, req, false)
- ret.RootPrivs = ret.ACLResults.RootPrivs
- // Root is always allowed; skip Sentinel/MFA checks
- if ret.ACLResults.IsRoot {
- //logger.Warn("token is root, skipping checks")
- ret.Allowed = true
- return ret
- }
- if !ret.ACLResults.Allowed {
- return ret
- }
- if !ret.RootPrivs && opts.RootPrivsRequired {
- return ret
- }
- }
-
- c.performEntPolicyChecks(ctx, acl, te, req, inEntity, opts, ret)
-
- return ret
-}
-
-func valueInParameterList(v interface{}, list []interface{}) bool {
- // Empty list is equivalent to the item always existing in the list
- if len(list) == 0 {
- return true
- }
-
- return valueInSlice(v, list)
-}
-
-func valueInSlice(v interface{}, list []interface{}) bool {
- for _, el := range list {
- if reflect.TypeOf(el).String() == "string" && reflect.TypeOf(v).String() == "string" {
- item := el.(string)
- val := v.(string)
-
- if strutil.GlobbedStringsMatch(item, val) {
- return true
- }
- } else if reflect.DeepEqual(el, v) {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/acl_util.go b/vendor/github.com/hashicorp/vault/vault/acl_util.go
deleted file mode 100644
index ade4c724..00000000
--- a/vendor/github.com/hashicorp/vault/vault/acl_util.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
-
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/logical"
-)
-
-func (c *Core) performEntPolicyChecks(ctx context.Context, acl *ACL, te *logical.TokenEntry, req *logical.Request, inEntity *identity.Entity, opts *PolicyCheckOpts, ret *AuthResults) {
- ret.Allowed = true
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/audit.go b/vendor/github.com/hashicorp/vault/vault/audit.go
deleted file mode 100644
index cc4c8d8d..00000000
--- a/vendor/github.com/hashicorp/vault/vault/audit.go
+++ /dev/null
@@ -1,502 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/sha256"
- "errors"
- "fmt"
- "strings"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // coreAuditConfigPath is used to store the audit configuration.
- // Audit configuration is protected within the Vault itself, which means it
- // can only be viewed or modified after an unseal.
- coreAuditConfigPath = "core/audit"
-
- // coreLocalAuditConfigPath is used to store audit information for local
- // (non-replicated) mounts
- coreLocalAuditConfigPath = "core/local-audit"
-
- // auditBarrierPrefix is the prefix to the UUID used in the
- // barrier view for the audit backends.
- auditBarrierPrefix = "audit/"
-
- // auditTableType is the value we expect to find for the audit table and
- // corresponding entries
- auditTableType = "audit"
-)
-
-var (
- // loadAuditFailed if loading audit tables encounters an error
- errLoadAuditFailed = errors.New("failed to setup audit table")
-)
-
-// enableAudit is used to enable a new audit backend
-func (c *Core) enableAudit(ctx context.Context, entry *MountEntry, updateStorage bool) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(entry.Path, "/") {
- entry.Path += "/"
- }
-
- // Ensure there is a name
- if entry.Path == "/" {
- return fmt.Errorf("backend path must be specified")
- }
-
- // Update the audit table
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- // Look for matching name
- for _, ent := range c.audit.Entries {
- switch {
- // Existing is sql/mysql/ new is sql/ or
- // existing is sql/ and new is sql/mysql/
- case strings.HasPrefix(ent.Path, entry.Path):
- fallthrough
- case strings.HasPrefix(entry.Path, ent.Path):
- return fmt.Errorf("path already in use")
- }
- }
-
- // Generate a new UUID and view
- if entry.UUID == "" {
- entryUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.UUID = entryUUID
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("audit_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- }
- viewPath := entry.ViewPath()
- view := NewBarrierView(c.barrier, viewPath)
- addAuditPathChecker(c, entry, view, viewPath)
- origViewReadOnlyErr := view.getReadOnlyErr()
-
- // Mark the view as read-only until the mounting is complete and
- // ensure that it is reset after. This ensures that there will be no
- // writes during the construction of the backend.
- view.setReadOnlyErr(logical.ErrSetupReadOnly)
- defer view.setReadOnlyErr(origViewReadOnlyErr)
-
- // Lookup the new backend
- backend, err := c.newAuditBackend(ctx, entry, view, entry.Options)
- if err != nil {
- return err
- }
- if backend == nil {
- return fmt.Errorf("nil audit backend of type %q returned from factory", entry.Type)
- }
-
- newTable := c.audit.shallowClone()
- newTable.Entries = append(newTable.Entries, entry)
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- entry.NamespaceID = ns.ID
- entry.namespace = ns
-
- if updateStorage {
- if err := c.persistAudit(ctx, newTable, entry.Local); err != nil {
- return errors.New("failed to update audit table")
- }
- }
-
- c.audit = newTable
-
- // Register the backend
- c.auditBroker.Register(entry.Path, backend, view, entry.Local)
- if c.logger.IsInfo() {
- c.logger.Info("enabled audit backend", "path", entry.Path, "type", entry.Type)
- }
-
- return nil
-}
-
-// disableAudit is used to disable an existing audit backend
-func (c *Core) disableAudit(ctx context.Context, path string, updateStorage bool) (bool, error) {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
-
- // Remove the entry from the mount table
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- newTable := c.audit.shallowClone()
- entry, err := newTable.remove(ctx, path)
- if err != nil {
- return false, err
- }
-
- // Ensure there was a match
- if entry == nil {
- return false, fmt.Errorf("no matching backend")
- }
-
- c.removeAuditReloadFunc(entry)
-
- // When unmounting all entries the JSON code will load back up from storage
- // as a nil slice, which kills tests...just set it nil explicitly
- if len(newTable.Entries) == 0 {
- newTable.Entries = nil
- }
-
- if updateStorage {
- // Update the audit table
- if err := c.persistAudit(ctx, newTable, entry.Local); err != nil {
- return true, errors.New("failed to update audit table")
- }
- }
-
- c.audit = newTable
-
- // Unmount the backend
- c.auditBroker.Deregister(path)
- if c.logger.IsInfo() {
- c.logger.Info("disabled audit backend", "path", path)
- }
-
- removeAuditPathChecker(c, entry)
-
- return true, nil
-}
-
-// loadAudits is invoked as part of postUnseal to load the audit table
-func (c *Core) loadAudits(ctx context.Context) error {
- auditTable := &MountTable{}
- localAuditTable := &MountTable{}
-
- // Load the existing audit table
- raw, err := c.barrier.Get(ctx, coreAuditConfigPath)
- if err != nil {
- c.logger.Error("failed to read audit table", "error", err)
- return errLoadAuditFailed
- }
- rawLocal, err := c.barrier.Get(ctx, coreLocalAuditConfigPath)
- if err != nil {
- c.logger.Error("failed to read local audit table", "error", err)
- return errLoadAuditFailed
- }
-
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- if raw != nil {
- if err := jsonutil.DecodeJSON(raw.Value, auditTable); err != nil {
- c.logger.Error("failed to decode audit table", "error", err)
- return errLoadAuditFailed
- }
- c.audit = auditTable
- }
-
- var needPersist bool
- if c.audit == nil {
- c.audit = defaultAuditTable()
- needPersist = true
- }
-
- if rawLocal != nil {
- if err := jsonutil.DecodeJSON(rawLocal.Value, localAuditTable); err != nil {
- c.logger.Error("failed to decode local audit table", "error", err)
- return errLoadAuditFailed
- }
- if localAuditTable != nil && len(localAuditTable.Entries) > 0 {
- c.audit.Entries = append(c.audit.Entries, localAuditTable.Entries...)
- }
- }
-
- // Upgrade to typed auth table
- if c.audit.Type == "" {
- c.audit.Type = auditTableType
- needPersist = true
- }
-
- // Upgrade to table-scoped entries
- for _, entry := range c.audit.Entries {
- if entry.Table == "" {
- entry.Table = c.audit.Type
- needPersist = true
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("audit_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- needPersist = true
- }
-
- if entry.NamespaceID == "" {
- entry.NamespaceID = namespace.RootNamespaceID
- needPersist = true
- }
- // Get the namespace from the namespace ID and load it in memory
- ns, err := NamespaceByID(ctx, entry.NamespaceID, c)
- if err != nil {
- return err
- }
- if ns == nil {
- return namespace.ErrNoNamespace
- }
- entry.namespace = ns
- }
-
- if !needPersist || c.perfStandby {
- return nil
- }
-
- if err := c.persistAudit(ctx, c.audit, false); err != nil {
- return errLoadAuditFailed
- }
- return nil
-}
-
-// persistAudit is used to persist the audit table after modification
-func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bool) error {
- if table.Type != auditTableType {
- c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType)
- return fmt.Errorf("invalid table type given, not persisting")
- }
-
- for _, entry := range table.Entries {
- if entry.Table != table.Type {
- c.logger.Error("given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
- return fmt.Errorf("invalid audit entry found, not persisting")
- }
- }
-
- nonLocalAudit := &MountTable{
- Type: auditTableType,
- }
-
- localAudit := &MountTable{
- Type: auditTableType,
- }
-
- for _, entry := range table.Entries {
- if entry.Local {
- localAudit.Entries = append(localAudit.Entries, entry)
- } else {
- nonLocalAudit.Entries = append(nonLocalAudit.Entries, entry)
- }
- }
-
- if !localOnly {
- // Marshal the table
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(nonLocalAudit, nil)
- if err != nil {
- c.logger.Error("failed to encode and/or compress audit table", "error", err)
- return err
- }
-
- // Create an entry
- entry := &Entry{
- Key: coreAuditConfigPath,
- Value: compressedBytes,
- }
-
- // Write to the physical backend
- if err := c.barrier.Put(ctx, entry); err != nil {
- c.logger.Error("failed to persist audit table", "error", err)
- return err
- }
- }
-
- // Repeat with local audit
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(localAudit, nil)
- if err != nil {
- c.logger.Error("failed to encode and/or compress local audit table", "error", err)
- return err
- }
-
- entry := &Entry{
- Key: coreLocalAuditConfigPath,
- Value: compressedBytes,
- }
-
- if err := c.barrier.Put(ctx, entry); err != nil {
- c.logger.Error("failed to persist local audit table", "error", err)
- return err
- }
-
- return nil
-}
-
-// setupAudit is invoked after we've loaded the audit able to
-// initialize the audit backends
-func (c *Core) setupAudits(ctx context.Context) error {
- brokerLogger := c.baseLogger.Named("audit")
- c.AddLogger(brokerLogger)
- broker := NewAuditBroker(brokerLogger)
-
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- var successCount int
-
- for _, entry := range c.audit.Entries {
- // Create a barrier view using the UUID
- viewPath := entry.ViewPath()
- view := NewBarrierView(c.barrier, viewPath)
- addAuditPathChecker(c, entry, view, viewPath)
- origViewReadOnlyErr := view.getReadOnlyErr()
-
- // Mark the view as read-only until the mounting is complete and
- // ensure that it is reset after. This ensures that there will be no
- // writes during the construction of the backend.
- view.setReadOnlyErr(logical.ErrSetupReadOnly)
- c.postUnsealFuncs = append(c.postUnsealFuncs, func() {
- view.setReadOnlyErr(origViewReadOnlyErr)
- })
-
- // Initialize the backend
- backend, err := c.newAuditBackend(ctx, entry, view, entry.Options)
- if err != nil {
- c.logger.Error("failed to create audit entry", "path", entry.Path, "error", err)
- continue
- }
- if backend == nil {
- c.logger.Error("created audit entry was nil", "path", entry.Path, "type", entry.Type)
- continue
- }
-
- // Mount the backend
- broker.Register(entry.Path, backend, view, entry.Local)
-
- successCount++
- }
-
- if len(c.audit.Entries) > 0 && successCount == 0 {
- return errLoadAuditFailed
- }
-
- c.auditBroker = broker
- return nil
-}
-
-// teardownAudit is used before we seal the vault to reset the audit
-// backends to their unloaded state. This is reversed by loadAudits.
-func (c *Core) teardownAudits() error {
- c.auditLock.Lock()
- defer c.auditLock.Unlock()
-
- if c.audit != nil {
- for _, entry := range c.audit.Entries {
- c.removeAuditReloadFunc(entry)
- removeAuditPathChecker(c, entry)
- }
- }
-
- c.audit = nil
- c.auditBroker = nil
- return nil
-}
-
-// removeAuditReloadFunc removes the reload func from the working set. The
-// audit lock needs to be held before calling this.
-func (c *Core) removeAuditReloadFunc(entry *MountEntry) {
- switch entry.Type {
- case "file":
- key := "audit_file|" + entry.Path
- c.reloadFuncsLock.Lock()
-
- if c.logger.IsDebug() {
- c.baseLogger.Named("audit").Debug("removing reload function", "path", entry.Path)
- }
-
- delete(c.reloadFuncs, key)
-
- c.reloadFuncsLock.Unlock()
- }
-}
-
-// newAuditBackend is used to create and configure a new audit backend by name
-func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logical.Storage, conf map[string]string) (audit.Backend, error) {
- f, ok := c.auditBackends[entry.Type]
- if !ok {
- return nil, fmt.Errorf("unknown backend type: %q", entry.Type)
- }
- saltConfig := &salt.Config{
- HMAC: sha256.New,
- HMACType: "hmac-sha256",
- Location: salt.DefaultLocation,
- }
-
- be, err := f(ctx, &audit.BackendConfig{
- SaltView: view,
- SaltConfig: saltConfig,
- Config: conf,
- })
- if err != nil {
- return nil, err
- }
- if be == nil {
- return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type)
- }
-
- auditLogger := c.baseLogger.Named("audit")
- c.AddLogger(auditLogger)
-
- switch entry.Type {
- case "file":
- key := "audit_file|" + entry.Path
-
- c.reloadFuncsLock.Lock()
-
- if auditLogger.IsDebug() {
- auditLogger.Debug("adding reload function", "path", entry.Path)
- if entry.Options != nil {
- auditLogger.Debug("file backend options", "path", entry.Path, "file_path", entry.Options["file_path"])
- }
- }
-
- c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]interface{}) error {
- if auditLogger.IsInfo() {
- auditLogger.Info("reloading file audit backend", "path", entry.Path)
- }
- return be.Reload(ctx)
- })
-
- c.reloadFuncsLock.Unlock()
- case "socket":
- if auditLogger.IsDebug() {
- if entry.Options != nil {
- auditLogger.Debug("socket backend options", "path", entry.Path, "address", entry.Options["address"], "socket type", entry.Options["socket_type"])
- }
- }
- case "syslog":
- if auditLogger.IsDebug() {
- if entry.Options != nil {
- auditLogger.Debug("syslog backend options", "path", entry.Path, "facility", entry.Options["facility"], "tag", entry.Options["tag"])
- }
- }
- }
-
- return be, err
-}
-
-// defaultAuditTable creates a default audit table
-func defaultAuditTable() *MountTable {
- table := &MountTable{
- Type: auditTableType,
- }
- return table
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/audit_broker.go b/vendor/github.com/hashicorp/vault/vault/audit_broker.go
deleted file mode 100644
index c5b56527..00000000
--- a/vendor/github.com/hashicorp/vault/vault/audit_broker.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "sync"
- "time"
-
- metrics "github.com/armon/go-metrics"
- log "github.com/hashicorp/go-hclog"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/audit"
-)
-
-type backendEntry struct {
- backend audit.Backend
- view *BarrierView
- local bool
-}
-
-// AuditBroker is used to provide a single ingest interface to auditable
-// events given that multiple backends may be configured.
-type AuditBroker struct {
- sync.RWMutex
- backends map[string]backendEntry
- logger log.Logger
-}
-
-// NewAuditBroker creates a new audit broker
-func NewAuditBroker(log log.Logger) *AuditBroker {
- b := &AuditBroker{
- backends: make(map[string]backendEntry),
- logger: log,
- }
- return b
-}
-
-// Register is used to add new audit backend to the broker
-func (a *AuditBroker) Register(name string, b audit.Backend, v *BarrierView, local bool) {
- a.Lock()
- defer a.Unlock()
- a.backends[name] = backendEntry{
- backend: b,
- view: v,
- local: local,
- }
-}
-
-// Deregister is used to remove an audit backend from the broker
-func (a *AuditBroker) Deregister(name string) {
- a.Lock()
- defer a.Unlock()
- delete(a.backends, name)
-}
-
-// IsRegistered is used to check if a given audit backend is registered
-func (a *AuditBroker) IsRegistered(name string) bool {
- a.RLock()
- defer a.RUnlock()
- _, ok := a.backends[name]
- return ok
-}
-
-// IsLocal is used to check if a given audit backend is registered
-func (a *AuditBroker) IsLocal(name string) (bool, error) {
- a.RLock()
- defer a.RUnlock()
- be, ok := a.backends[name]
- if ok {
- return be.local, nil
- }
- return false, fmt.Errorf("unknown audit backend %q", name)
-}
-
-// GetHash returns a hash using the salt of the given backend
-func (a *AuditBroker) GetHash(ctx context.Context, name string, input string) (string, error) {
- a.RLock()
- defer a.RUnlock()
- be, ok := a.backends[name]
- if !ok {
- return "", fmt.Errorf("unknown audit backend %q", name)
- }
-
- return be.backend.GetHash(ctx, input)
-}
-
-// LogRequest is used to ensure all the audit backends have an opportunity to
-// log the given request and that *at least one* succeeds.
-func (a *AuditBroker) LogRequest(ctx context.Context, in *audit.LogInput, headersConfig *AuditedHeadersConfig) (ret error) {
- defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now())
- a.RLock()
- defer a.RUnlock()
-
- var retErr *multierror.Error
-
- defer func() {
- if r := recover(); r != nil {
- a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r)
- retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
- }
-
- ret = retErr.ErrorOrNil()
- failure := float32(0.0)
- if ret != nil {
- failure = 1.0
- }
- metrics.IncrCounter([]string{"audit", "log_request_failure"}, failure)
- }()
-
- // All logged requests must have an identifier
- //if req.ID == "" {
- // a.logger.Error("missing identifier in request object", "request_path", req.Path)
- // retErr = multierror.Append(retErr, fmt.Errorf("missing identifier in request object: %s", req.Path))
- // return
- //}
-
- headers := in.Request.Headers
- defer func() {
- in.Request.Headers = headers
- }()
-
- // Ensure at least one backend logs
- anyLogged := false
- for name, be := range a.backends {
- in.Request.Headers = nil
- transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend.GetHash)
- if thErr != nil {
- a.logger.Error("backend failed to include headers", "backend", name, "error", thErr)
- continue
- }
- in.Request.Headers = transHeaders
-
- start := time.Now()
- lrErr := be.backend.LogRequest(ctx, in)
- metrics.MeasureSince([]string{"audit", name, "log_request"}, start)
- if lrErr != nil {
- a.logger.Error("backend failed to log request", "backend", name, "error", lrErr)
- } else {
- anyLogged = true
- }
- }
- if !anyLogged && len(a.backends) > 0 {
- retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the request"))
- }
-
- return retErr.ErrorOrNil()
-}
-
-// LogResponse is used to ensure all the audit backends have an opportunity to
-// log the given response and that *at least one* succeeds.
-func (a *AuditBroker) LogResponse(ctx context.Context, in *audit.LogInput, headersConfig *AuditedHeadersConfig) (ret error) {
- defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now())
- a.RLock()
- defer a.RUnlock()
-
- var retErr *multierror.Error
-
- defer func() {
- if r := recover(); r != nil {
- a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r)
- retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
- }
-
- ret = retErr.ErrorOrNil()
-
- failure := float32(0.0)
- if ret != nil {
- failure = 1.0
- }
- metrics.IncrCounter([]string{"audit", "log_response_failure"}, failure)
- }()
-
- headers := in.Request.Headers
- defer func() {
- in.Request.Headers = headers
- }()
-
- // Ensure at least one backend logs
- anyLogged := false
- for name, be := range a.backends {
- in.Request.Headers = nil
- transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend.GetHash)
- if thErr != nil {
- a.logger.Error("backend failed to include headers", "backend", name, "error", thErr)
- continue
- }
- in.Request.Headers = transHeaders
-
- start := time.Now()
- lrErr := be.backend.LogResponse(ctx, in)
- metrics.MeasureSince([]string{"audit", name, "log_response"}, start)
- if lrErr != nil {
- a.logger.Error("backend failed to log response", "backend", name, "error", lrErr)
- } else {
- anyLogged = true
- }
- }
- if !anyLogged && len(a.backends) > 0 {
- retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the response"))
- }
-
- return retErr.ErrorOrNil()
-}
-
-func (a *AuditBroker) Invalidate(ctx context.Context, key string) {
- // For now we ignore the key as this would only apply to salts. We just
- // sort of brute force it on each one.
- a.Lock()
- defer a.Unlock()
- for _, be := range a.backends {
- be.backend.Invalidate(ctx)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/audited_headers.go b/vendor/github.com/hashicorp/vault/vault/audited_headers.go
deleted file mode 100644
index ca8383ea..00000000
--- a/vendor/github.com/hashicorp/vault/vault/audited_headers.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/logical"
-)
-
-// N.B.: While we could use textproto to get the canonical mime header, HTTP/2
-// requires all headers to be converted to lower case, so we just do that.
-
-const (
- // Key used in the BarrierView to store and retrieve the header config
- auditedHeadersEntry = "audited-headers"
- // Path used to create a sub view off of BarrierView
- auditedHeadersSubPath = "audited-headers-config/"
-)
-
-type auditedHeaderSettings struct {
- HMAC bool `json:"hmac"`
-}
-
-// AuditedHeadersConfig is used by the Audit Broker to write only approved
-// headers to the audit logs. It uses a BarrierView to persist the settings.
-type AuditedHeadersConfig struct {
- Headers map[string]*auditedHeaderSettings
-
- view *BarrierView
- sync.RWMutex
-}
-
-// add adds or overwrites a header in the config and updates the barrier view
-func (a *AuditedHeadersConfig) add(ctx context.Context, header string, hmac bool) error {
- if header == "" {
- return fmt.Errorf("header value cannot be empty")
- }
-
- // Grab a write lock
- a.Lock()
- defer a.Unlock()
-
- if a.Headers == nil {
- a.Headers = make(map[string]*auditedHeaderSettings, 1)
- }
-
- a.Headers[strings.ToLower(header)] = &auditedHeaderSettings{hmac}
- entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers)
- if err != nil {
- return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err)
- }
-
- if err := a.view.Put(ctx, entry); err != nil {
- return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err)
- }
-
- return nil
-}
-
-// remove deletes a header out of the header config and updates the barrier view
-func (a *AuditedHeadersConfig) remove(ctx context.Context, header string) error {
- if header == "" {
- return fmt.Errorf("header value cannot be empty")
- }
-
- // Grab a write lock
- a.Lock()
- defer a.Unlock()
-
- // Nothing to delete
- if len(a.Headers) == 0 {
- return nil
- }
-
- delete(a.Headers, strings.ToLower(header))
- entry, err := logical.StorageEntryJSON(auditedHeadersEntry, a.Headers)
- if err != nil {
- return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err)
- }
-
- if err := a.view.Put(ctx, entry); err != nil {
- return errwrap.Wrapf("failed to persist audited headers config: {{err}}", err)
- }
-
- return nil
-}
-
-// ApplyConfig returns a map of approved headers and their values, either
-// hmac'ed or plaintext
-func (a *AuditedHeadersConfig) ApplyConfig(ctx context.Context, headers map[string][]string, hashFunc func(context.Context, string) (string, error)) (result map[string][]string, retErr error) {
- // Grab a read lock
- a.RLock()
- defer a.RUnlock()
-
- // Make a copy of the incoming headers with everything lower so we can
- // case-insensitively compare
- lowerHeaders := make(map[string][]string, len(headers))
- for k, v := range headers {
- lowerHeaders[strings.ToLower(k)] = v
- }
-
- result = make(map[string][]string, len(a.Headers))
- for key, settings := range a.Headers {
- if val, ok := lowerHeaders[key]; ok {
- // copy the header values so we don't overwrite them
- hVals := make([]string, len(val))
- copy(hVals, val)
-
- // Optionally hmac the values
- if settings.HMAC {
- for i, el := range hVals {
- hVal, err := hashFunc(ctx, el)
- if err != nil {
- return nil, err
- }
- hVals[i] = hVal
- }
- }
-
- result[key] = hVals
- }
- }
-
- return result, nil
-}
-
-// Initialize the headers config by loading from the barrier view
-func (c *Core) setupAuditedHeadersConfig(ctx context.Context) error {
- // Create a sub-view
- view := c.systemBarrierView.SubView(auditedHeadersSubPath)
-
- // Create the config
- out, err := view.Get(ctx, auditedHeadersEntry)
- if err != nil {
- return errwrap.Wrapf("failed to read config: {{err}}", err)
- }
-
- headers := make(map[string]*auditedHeaderSettings)
- if out != nil {
- err = out.DecodeJSON(&headers)
- if err != nil {
- return err
- }
- }
-
- // Ensure that we are able to case-sensitively access the headers;
- // necessary for the upgrade case
- lowerHeaders := make(map[string]*auditedHeaderSettings, len(headers))
- for k, v := range headers {
- lowerHeaders[strings.ToLower(k)] = v
- }
-
- c.auditedHeaders = &AuditedHeadersConfig{
- Headers: lowerHeaders,
- view: view,
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/auth.go b/vendor/github.com/hashicorp/vault/vault/auth.go
deleted file mode 100644
index 3a3f8014..00000000
--- a/vendor/github.com/hashicorp/vault/vault/auth.go
+++ /dev/null
@@ -1,776 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/builtin/plugin"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // coreAuthConfigPath is used to store the auth configuration.
- // Auth configuration is protected within the Vault itself, which means it
- // can only be viewed or modified after an unseal.
- coreAuthConfigPath = "core/auth"
-
- // coreLocalAuthConfigPath is used to store credential configuration for
- // local (non-replicated) mounts
- coreLocalAuthConfigPath = "core/local-auth"
-
- // credentialBarrierPrefix is the prefix to the UUID used in the
- // barrier view for the credential backends.
- credentialBarrierPrefix = "auth/"
-
- // credentialRoutePrefix is the mount prefix used for the router
- credentialRoutePrefix = "auth/"
-
- // credentialTableType is the value we expect to find for the credential
- // table and corresponding entries
- credentialTableType = "auth"
-)
-
-var (
- // errLoadAuthFailed if loadCredentials encounters an error
- errLoadAuthFailed = errors.New("failed to setup auth table")
-
- // credentialAliases maps old backend names to new backend names, allowing us
- // to move/rename backends but maintain backwards compatibility
- credentialAliases = map[string]string{"aws-ec2": "aws"}
-)
-
-// enableCredential is used to enable a new credential backend
-func (c *Core) enableCredential(ctx context.Context, entry *MountEntry) error {
- return c.enableCredentialInternal(ctx, entry, MountTableUpdateStorage)
-}
-
-// enableCredential is used to enable a new credential backend
-func (c *Core) enableCredentialInternal(ctx context.Context, entry *MountEntry, updateStorage bool) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(entry.Path, "/") {
- entry.Path += "/"
- }
-
- // Ensure there is a name
- if entry.Path == "/" {
- return fmt.Errorf("backend path must be specified")
- }
-
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- entry.NamespaceID = ns.ID
- entry.namespace = ns
-
- // Populate cache
- NamespaceByID(ctx, ns.ID, c)
-
- // Look for matching name
- for _, ent := range c.auth.Entries {
- if ns.ID == ent.NamespaceID {
- switch {
- // Existing is oauth/github/ new is oauth/ or
- // existing is oauth/ and new is oauth/github/
- case strings.HasPrefix(ent.Path, entry.Path):
- fallthrough
- case strings.HasPrefix(entry.Path, ent.Path):
- return logical.CodedError(409, "path is already in use")
- }
- }
- }
-
- // Ensure the token backend is a singleton
- if entry.Type == "token" {
- return fmt.Errorf("token credential backend cannot be instantiated")
- }
-
- if conflict := c.router.MountConflict(ctx, credentialRoutePrefix+entry.Path); conflict != "" {
- return logical.CodedError(409, fmt.Sprintf("existing mount at %s", conflict))
- }
-
- // Generate a new UUID and view
- if entry.UUID == "" {
- entryUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.UUID = entryUUID
- }
- if entry.BackendAwareUUID == "" {
- bUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.BackendAwareUUID = bUUID
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("auth_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- }
- // Sync values to the cache
- entry.SyncCache()
-
- viewPath := entry.ViewPath()
- view := NewBarrierView(c.barrier, viewPath)
-
- nilMount, err := preprocessMount(c, entry, view)
- if err != nil {
- return err
- }
- origViewReadOnlyErr := view.getReadOnlyErr()
-
- // Mark the view as read-only until the mounting is complete and
- // ensure that it is reset after. This ensures that there will be no
- // writes during the construction of the backend.
- view.setReadOnlyErr(logical.ErrSetupReadOnly)
- defer view.setReadOnlyErr(origViewReadOnlyErr)
-
- var backend logical.Backend
- // Create the new backend
- sysView := c.mountEntrySysView(entry)
- backend, err = c.newCredentialBackend(ctx, entry, sysView, view)
- if err != nil {
- return err
- }
- if backend == nil {
- return fmt.Errorf("nil backend returned from %q factory", entry.Type)
- }
-
- // Check for the correct backend type
- backendType := backend.Type()
- if backendType != logical.TypeCredential {
- return fmt.Errorf("cannot mount %q of type %q as an auth backend", entry.Type, backendType)
- }
-
- addPathCheckers(c, entry, backend, viewPath)
-
- // If the mount is filtered or we are on a DR secondary we don't want to
- // keep the actual backend running, so we clean it up and set it to nil
- // so the router does not have a pointer to the object.
- if nilMount {
- backend.Cleanup(ctx)
- backend = nil
- }
-
- // Update the auth table
- newTable := c.auth.shallowClone()
- newTable.Entries = append(newTable.Entries, entry)
- if updateStorage {
- if err := c.persistAuth(ctx, newTable, &entry.Local); err != nil {
- if err == logical.ErrReadOnly && c.perfStandby {
- return err
- }
- return errors.New("failed to update auth table")
- }
- }
-
- c.auth = newTable
-
- if err := c.router.Mount(backend, credentialRoutePrefix+entry.Path, entry, view); err != nil {
- return err
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("enabled credential backend", "path", entry.Path, "type", entry.Type)
- }
- return nil
-}
-
-// disableCredential is used to disable an existing credential backend; the
-// boolean indicates if it existed
-func (c *Core) disableCredential(ctx context.Context, path string) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
-
- // Ensure the token backend is not affected
- if path == "token/" {
- return fmt.Errorf("token credential backend cannot be disabled")
- }
-
- return c.disableCredentialInternal(ctx, path, MountTableUpdateStorage)
-}
-
-func (c *Core) disableCredentialInternal(ctx context.Context, path string, updateStorage bool) error {
- path = credentialRoutePrefix + path
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
-
- // Verify exact match of the route
- match := c.router.MatchingMount(ctx, path)
- if match == "" || ns.Path+path != match {
- return fmt.Errorf("no matching mount")
- }
-
- // Store the view for this backend
- view := c.router.MatchingStorageByAPIPath(ctx, path)
- if view == nil {
- return fmt.Errorf("no matching backend %q", path)
- }
-
- // Get the backend/mount entry for this path, used to remove ignored
- // replication prefixes
- backend := c.router.MatchingBackend(ctx, path)
- entry := c.router.MatchingMountEntry(ctx, path)
-
- // Mark the entry as tainted
- if err := c.taintCredEntry(ctx, path, updateStorage); err != nil {
- return err
- }
-
- // Taint the router path to prevent routing
- if err := c.router.Taint(ctx, path); err != nil {
- return err
- }
-
- if c.expiration != nil && backend != nil {
- // Revoke credentials from this path
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- revokeCtx := namespace.ContextWithNamespace(c.activeContext, ns)
- if err := c.expiration.RevokePrefix(revokeCtx, path, true); err != nil {
- return err
- }
- }
-
- if backend != nil {
- // Call cleanup function if it exists
- backend.Cleanup(ctx)
- }
-
- // Unmount the backend
- if err := c.router.Unmount(ctx, path); err != nil {
- return err
- }
-
- viewPath := entry.ViewPath()
- switch {
- case !updateStorage:
- case c.IsDRSecondary(), entry.Local, !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary):
- // Have writable storage, remove the whole thing
- if err := logical.ClearView(ctx, view); err != nil {
- c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path)
- return err
- }
-
- case !entry.Local && c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary):
- if err := clearIgnoredPaths(ctx, c, backend, viewPath); err != nil {
- return err
- }
- }
-
- // Remove the mount table entry
- if err := c.removeCredEntry(ctx, strings.TrimPrefix(path, credentialRoutePrefix), updateStorage); err != nil {
- return err
- }
-
- removePathCheckers(c, entry, viewPath)
-
- if c.logger.IsInfo() {
- c.logger.Info("disabled credential backend", "path", path)
- }
-
- return nil
-}
-
-// removeCredEntry is used to remove an entry in the auth table
-func (c *Core) removeCredEntry(ctx context.Context, path string, updateStorage bool) error {
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- // Taint the entry from the auth table
- newTable := c.auth.shallowClone()
- entry, err := newTable.remove(ctx, path)
- if err != nil {
- return err
- }
- if entry == nil {
- c.logger.Error("nil entry found removing entry in auth table", "path", path)
- return logical.CodedError(500, "failed to remove entry in auth table")
- }
-
- if updateStorage {
- // Update the auth table
- if err := c.persistAuth(ctx, newTable, &entry.Local); err != nil {
- if err == logical.ErrReadOnly && c.perfStandby {
- return err
- }
-
- return errors.New("failed to update auth table")
- }
- }
-
- c.auth = newTable
-
- return nil
-}
-
-// remountCredEntryForce takes a copy of the mount entry for the path and fully
-// unmounts and remounts the backend to pick up any changes, such as filtered
-// paths
-func (c *Core) remountCredEntryForce(ctx context.Context, path string) error {
- fullPath := credentialRoutePrefix + path
- me := c.router.MatchingMountEntry(ctx, fullPath)
- if me == nil {
- return fmt.Errorf("cannot find mount for path %q", path)
- }
-
- me, err := me.Clone()
- if err != nil {
- return err
- }
-
- if err := c.disableCredential(ctx, path); err != nil {
- return err
- }
- return c.enableCredential(ctx, me)
-}
-
-// taintCredEntry is used to mark an entry in the auth table as tainted
-func (c *Core) taintCredEntry(ctx context.Context, path string, updateStorage bool) error {
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- // Taint the entry from the auth table
- // We do this on the original since setting the taint operates
- // on the entries which a shallow clone shares anyways
- entry, err := c.auth.setTaint(ctx, strings.TrimPrefix(path, credentialRoutePrefix), true)
- if err != nil {
- return err
- }
-
- // Ensure there was a match
- if entry == nil {
- return fmt.Errorf("no matching backend")
- }
-
- if updateStorage {
- // Update the auth table
- if err := c.persistAuth(ctx, c.auth, &entry.Local); err != nil {
- if err == logical.ErrReadOnly && c.perfStandby {
- return err
- }
- return errors.New("failed to update auth table")
- }
- }
-
- return nil
-}
-
-// loadCredentials is invoked as part of postUnseal to load the auth table
-func (c *Core) loadCredentials(ctx context.Context) error {
- // Load the existing mount table
- raw, err := c.barrier.Get(ctx, coreAuthConfigPath)
- if err != nil {
- c.logger.Error("failed to read auth table", "error", err)
- return errLoadAuthFailed
- }
- rawLocal, err := c.barrier.Get(ctx, coreLocalAuthConfigPath)
- if err != nil {
- c.logger.Error("failed to read local auth table", "error", err)
- return errLoadAuthFailed
- }
-
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- if raw != nil {
- authTable, err := c.decodeMountTable(ctx, raw.Value)
- if err != nil {
- c.logger.Error("failed to decompress and/or decode the auth table", "error", err)
- return err
- }
- c.auth = authTable
- }
-
- var needPersist bool
- if c.auth == nil {
- c.auth = c.defaultAuthTable()
- needPersist = true
- }
-
- if rawLocal != nil {
- localAuthTable, err := c.decodeMountTable(ctx, rawLocal.Value)
- if err != nil {
- c.logger.Error("failed to decompress and/or decode the local mount table", "error", err)
- return err
- }
- if localAuthTable != nil && len(localAuthTable.Entries) > 0 {
- c.auth.Entries = append(c.auth.Entries, localAuthTable.Entries...)
- }
- }
-
- // Upgrade to typed auth table
- if c.auth.Type == "" {
- c.auth.Type = credentialTableType
- needPersist = true
- }
-
- // Upgrade to table-scoped entries
- for _, entry := range c.auth.Entries {
- if entry.Table == "" {
- entry.Table = c.auth.Type
- needPersist = true
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor("auth_" + entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- needPersist = true
- }
- if entry.BackendAwareUUID == "" {
- bUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.BackendAwareUUID = bUUID
- needPersist = true
- }
-
- if entry.NamespaceID == "" {
- entry.NamespaceID = namespace.RootNamespaceID
- needPersist = true
- }
- ns, err := NamespaceByID(ctx, entry.NamespaceID, c)
- if err != nil {
- return err
- }
- if ns == nil {
- return namespace.ErrNoNamespace
- }
- entry.namespace = ns
-
- // Sync values to the cache
- entry.SyncCache()
- }
-
- if !needPersist {
- return nil
- }
-
- if err := c.persistAuth(ctx, c.auth, nil); err != nil {
- c.logger.Error("failed to persist auth table", "error", err)
- return errLoadAuthFailed
- }
-
- return nil
-}
-
-// persistAuth is used to persist the auth table after modification
-func (c *Core) persistAuth(ctx context.Context, table *MountTable, local *bool) error {
- if table.Type != credentialTableType {
- c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", credentialTableType)
- return fmt.Errorf("invalid table type given, not persisting")
- }
-
- for _, entry := range table.Entries {
- if entry.Table != table.Type {
- c.logger.Error("given entry to persist in auth table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
- return fmt.Errorf("invalid auth entry found, not persisting")
- }
- }
-
- nonLocalAuth := &MountTable{
- Type: credentialTableType,
- }
-
- localAuth := &MountTable{
- Type: credentialTableType,
- }
-
- for _, entry := range table.Entries {
- if entry.Local {
- localAuth.Entries = append(localAuth.Entries, entry)
- } else {
- nonLocalAuth.Entries = append(nonLocalAuth.Entries, entry)
- }
- }
-
- writeTable := func(mt *MountTable, path string) error {
- // Encode the mount table into JSON and compress it (lzw).
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(mt, nil)
- if err != nil {
- c.logger.Error("failed to encode or compress auth mount table", "error", err)
- return err
- }
-
- // Create an entry
- entry := &Entry{
- Key: path,
- Value: compressedBytes,
- }
-
- // Write to the physical backend
- if err := c.barrier.Put(ctx, entry); err != nil {
- c.logger.Error("failed to persist auth mount table", "error", err)
- return err
- }
- return nil
- }
-
- var err error
- switch {
- case local == nil:
- // Write non-local mounts
- err := writeTable(nonLocalAuth, coreAuthConfigPath)
- if err != nil {
- return err
- }
-
- // Write local mounts
- err = writeTable(localAuth, coreLocalAuthConfigPath)
- if err != nil {
- return err
- }
- case *local:
- err = writeTable(localAuth, coreLocalAuthConfigPath)
- default:
- err = writeTable(nonLocalAuth, coreAuthConfigPath)
- }
-
- return err
-}
-
-// setupCredentials is invoked after we've loaded the auth table to
-// initialize the credential backends and setup the router
-func (c *Core) setupCredentials(ctx context.Context) error {
- var persistNeeded bool
-
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- for _, entry := range c.auth.sortEntriesByPathDepth().Entries {
- var backend logical.Backend
-
- // Create a barrier view using the UUID
- viewPath := entry.ViewPath()
-
- // Singleton mounts cannot be filtered on a per-secondary basis
- // from replication
- if strutil.StrListContains(singletonMounts, entry.Type) {
- addFilterablePath(c, viewPath)
- }
-
- view := NewBarrierView(c.barrier, viewPath)
-
- // Determining the replicated state of the mount
- nilMount, err := preprocessMount(c, entry, view)
- if err != nil {
- return err
- }
- origViewReadOnlyErr := view.getReadOnlyErr()
-
- // Mark the view as read-only until the mounting is complete and
- // ensure that it is reset after. This ensures that there will be no
- // writes during the construction of the backend.
- view.setReadOnlyErr(logical.ErrSetupReadOnly)
- if strutil.StrListContains(singletonMounts, entry.Type) {
- defer view.setReadOnlyErr(origViewReadOnlyErr)
- } else {
- c.postUnsealFuncs = append(c.postUnsealFuncs, func() {
- view.setReadOnlyErr(origViewReadOnlyErr)
- })
- }
-
- // Initialize the backend
- sysView := c.mountEntrySysView(entry)
-
- backend, err = c.newCredentialBackend(ctx, entry, sysView, view)
- if err != nil {
- c.logger.Error("failed to create credential entry", "path", entry.Path, "error", err)
- if !c.builtinRegistry.Contains(entry.Type, consts.PluginTypeCredential) {
- // If we encounter an error instantiating the backend due to an error,
- // skip backend initialization but register the entry to the mount table
- // to preserve storage and path.
- c.logger.Warn("skipping plugin-based credential entry", "path", entry.Path)
- goto ROUTER_MOUNT
- }
- return errLoadAuthFailed
- }
- if backend == nil {
- return fmt.Errorf("nil backend returned from %q factory", entry.Type)
- }
-
- {
- // Check for the correct backend type
- backendType := backend.Type()
- if backendType != logical.TypeCredential {
- return fmt.Errorf("cannot mount %q of type %q as an auth backend", entry.Type, backendType)
- }
-
- addPathCheckers(c, entry, backend, viewPath)
- }
-
- // If the mount is filtered or we are on a DR secondary we don't want to
- // keep the actual backend running, so we clean it up and set it to nil
- // so the router does not have a pointer to the object.
- if nilMount {
- backend.Cleanup(ctx)
- backend = nil
- }
-
- ROUTER_MOUNT:
- // Mount the backend
- path := credentialRoutePrefix + entry.Path
- err = c.router.Mount(backend, path, entry, view)
- if err != nil {
- c.logger.Error("failed to mount auth entry", "path", entry.Path, "error", err)
- return errLoadAuthFailed
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("successfully enabled credential backend", "type", entry.Type, "path", entry.Path)
- }
-
- // Ensure the path is tainted if set in the mount table
- if entry.Tainted {
- c.router.Taint(ctx, path)
- }
-
- // Check if this is the token store
- if entry.Type == "token" {
- c.tokenStore = backend.(*TokenStore)
-
- // At some point when this isn't beta we may persist this but for
- // now always set it on mount
- entry.Config.TokenType = logical.TokenTypeDefaultService
-
- // this is loaded *after* the normal mounts, including cubbyhole
- c.router.tokenStoreSaltFunc = c.tokenStore.Salt
- if !c.IsDRSecondary() {
- c.tokenStore.cubbyholeBackend = c.router.MatchingBackend(ctx, cubbyholeMountPath).(*CubbyholeBackend)
- }
- }
-
- // Populate cache
- NamespaceByID(ctx, entry.NamespaceID, c)
- }
-
- if persistNeeded {
- // persist non-local auth
- return c.persistAuth(ctx, c.auth, nil)
- }
-
- return nil
-}
-
-// teardownCredentials is used before we seal the vault to reset the credential
-// backends to their unloaded state. This is reversed by loadCredentials.
-func (c *Core) teardownCredentials(ctx context.Context) error {
- c.authLock.Lock()
- defer c.authLock.Unlock()
-
- if c.auth != nil {
- authTable := c.auth.shallowClone()
- for _, e := range authTable.Entries {
- backend := c.router.MatchingBackend(namespace.ContextWithNamespace(ctx, e.namespace), credentialRoutePrefix+e.Path)
- if backend != nil {
- backend.Cleanup(ctx)
- }
-
- viewPath := e.ViewPath()
- removePathCheckers(c, e, viewPath)
- }
- }
-
- c.auth = nil
- c.tokenStore = nil
- return nil
-}
-
-// newCredentialBackend is used to create and configure a new credential backend by name
-func (c *Core) newCredentialBackend(ctx context.Context, entry *MountEntry, sysView logical.SystemView, view logical.Storage) (logical.Backend, error) {
- t := entry.Type
- if alias, ok := credentialAliases[t]; ok {
- t = alias
- }
-
- f, ok := c.credentialBackends[t]
- if !ok {
- f = plugin.Factory
- }
-
- // Set up conf to pass in plugin_name
- conf := make(map[string]string, len(entry.Options)+1)
- for k, v := range entry.Options {
- conf[k] = v
- }
-
- switch {
- case entry.Type == "plugin":
- conf["plugin_name"] = entry.Config.PluginName
- default:
- conf["plugin_name"] = t
- }
-
- conf["plugin_type"] = consts.PluginTypeCredential.String()
-
- authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor))
- c.AddLogger(authLogger)
- config := &logical.BackendConfig{
- StorageView: view,
- Logger: authLogger,
- Config: conf,
- System: sysView,
- BackendUUID: entry.BackendAwareUUID,
- }
-
- b, err := f(ctx, config)
- if err != nil {
- return nil, err
- }
-
- return b, nil
-}
-
-// defaultAuthTable creates a default auth table
-func (c *Core) defaultAuthTable() *MountTable {
- table := &MountTable{
- Type: credentialTableType,
- }
- tokenUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not generate UUID for default auth table token entry: %v", err))
- }
- tokenAccessor, err := c.generateMountAccessor("auth_token")
- if err != nil {
- panic(fmt.Sprintf("could not generate accessor for default auth table token entry: %v", err))
- }
- tokenBackendUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create identity backend UUID: %v", err))
- }
- tokenAuth := &MountEntry{
- Table: credentialTableType,
- Path: "token/",
- Type: "token",
- Description: "token based credentials",
- UUID: tokenUUID,
- Accessor: tokenAccessor,
- BackendAwareUUID: tokenBackendUUID,
- }
- table.Entries = append(table.Entries, tokenAuth)
- return table
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier.go b/vendor/github.com/hashicorp/vault/vault/barrier.go
deleted file mode 100644
index 7f8a3138..00000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "time"
-
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- // ErrBarrierSealed is returned if an operation is performed on
- // a sealed barrier. No operation is expected to succeed before unsealing
- ErrBarrierSealed = errors.New("Vault is sealed")
-
- // ErrBarrierAlreadyInit is returned if the barrier is already
- // initialized. This prevents a re-initialization.
- ErrBarrierAlreadyInit = errors.New("Vault is already initialized")
-
- // ErrBarrierNotInit is returned if a non-initialized barrier
- // is attempted to be unsealed.
- ErrBarrierNotInit = errors.New("Vault is not initialized")
-
- // ErrBarrierInvalidKey is returned if the Unseal key is invalid
- ErrBarrierInvalidKey = errors.New("Unseal failed, invalid key")
-)
-
-const (
- // barrierInitPath is the path used to store our init sentinel file
- barrierInitPath = "barrier/init"
-
- // keyringPath is the location of the keyring data. This is encrypted
- // by the master key.
- keyringPath = "core/keyring"
- keyringPrefix = "core/"
-
- // keyringUpgradePrefix is the path used to store keyring update entries.
- // When running in HA mode, the active instance will install the new key
- // and re-write the keyring. For standby instances, they need an upgrade
- // path from key N to N+1. They cannot just use the master key because
- // in the event of a rekey, that master key can no longer decrypt the keyring.
- // When key N+1 is installed, we create an entry at "prefix/N" which uses
- // encryption key N to provide the N+1 key. The standby instances scan
- // for this periodically and refresh their keyring. The upgrade keys
- // are deleted after a few minutes, but this provides enough time for the
- // standby instances to upgrade without causing any disruption.
- keyringUpgradePrefix = "core/upgrade/"
-
- // masterKeyPath is the location of the master key. This is encrypted
- // by the latest key in the keyring. This is only used by standby instances
- // to handle the case of a rekey. If the active instance does a rekey,
- // the standby instances can no longer reload the keyring since they
- // have the old master key. This key can be decrypted if you have the
- // keyring to discover the new master key. The new master key is then
- // used to reload the keyring itself.
- masterKeyPath = "core/master"
-)
-
-// SecurityBarrier is a critical component of Vault. It is used to wrap
-// an untrusted physical backend and provide a single point of encryption,
-// decryption and checksum verification. The goal is to ensure that any
-// data written to the barrier is confidential and that integrity is preserved.
-// As a real-world analogy, this is the steel and concrete wrapper around
-// a Vault. The barrier should only be Unlockable given its key.
-type SecurityBarrier interface {
- // Initialized checks if the barrier has been initialized
- // and has a master key set.
- Initialized(ctx context.Context) (bool, error)
-
- // Initialize works only if the barrier has not been initialized
- // and makes use of the given master key.
- Initialize(context.Context, []byte) error
-
- // GenerateKey is used to generate a new key
- GenerateKey() ([]byte, error)
-
- // KeyLength is used to sanity check a key
- KeyLength() (int, int)
-
- // Sealed checks if the barrier has been unlocked yet. The Barrier
- // is not expected to be able to perform any CRUD until it is unsealed.
- Sealed() (bool, error)
-
- // Unseal is used to provide the master key which permits the barrier
- // to be unsealed. If the key is not correct, the barrier remains sealed.
- Unseal(ctx context.Context, key []byte) error
-
- // VerifyMaster is used to check if the given key matches the master key
- VerifyMaster(key []byte) error
-
- // SetMasterKey is used to directly set a new master key. This is used in
- // replicated scenarios due to the chicken and egg problem of reloading the
- // keyring from disk before we have the master key to decrypt it.
- SetMasterKey(key []byte) error
-
- // ReloadKeyring is used to re-read the underlying keyring.
- // This is used for HA deployments to ensure the latest keyring
- // is present in the leader.
- ReloadKeyring(ctx context.Context) error
-
- // ReloadMasterKey is used to re-read the underlying masterkey.
- // This is used for HA deployments to ensure the latest master key
- // is available for keyring reloading.
- ReloadMasterKey(ctx context.Context) error
-
- // Seal is used to re-seal the barrier. This requires the barrier to
- // be unsealed again to perform any further operations.
- Seal() error
-
- // Rotate is used to create a new encryption key. All future writes
- // should use the new key, while old values should still be decryptable.
- Rotate(ctx context.Context) (uint32, error)
-
- // CreateUpgrade creates an upgrade path key to the given term from the previous term
- CreateUpgrade(ctx context.Context, term uint32) error
-
- // DestroyUpgrade destroys the upgrade path key to the given term
- DestroyUpgrade(ctx context.Context, term uint32) error
-
- // CheckUpgrade looks for an upgrade to the current term and installs it
- CheckUpgrade(ctx context.Context) (bool, uint32, error)
-
- // ActiveKeyInfo is used to inform details about the active key
- ActiveKeyInfo() (*KeyInfo, error)
-
- // Rekey is used to change the master key used to protect the keyring
- Rekey(context.Context, []byte) error
-
- // For replication we must send over the keyring, so this must be available
- Keyring() (*Keyring, error)
-
- // SecurityBarrier must provide the storage APIs
- BarrierStorage
-
- // SecurityBarrier must provide the encryption APIs
- BarrierEncryptor
-}
-
-// BarrierStorage is the storage only interface required for a Barrier.
-type BarrierStorage interface {
- // Put is used to insert or update an entry
- Put(ctx context.Context, entry *Entry) error
-
- // Get is used to fetch an entry
- Get(ctx context.Context, key string) (*Entry, error)
-
- // Delete is used to permanently delete an entry
- Delete(ctx context.Context, key string) error
-
- // List is used ot list all the keys under a given
- // prefix, up to the next prefix.
- List(ctx context.Context, prefix string) ([]string, error)
-}
-
-// BarrierEncryptor is the in memory only interface that does not actually
-// use the underlying barrier. It is used for lower level modules like the
-// Write-Ahead-Log and Merkle index to allow them to use the barrier.
-type BarrierEncryptor interface {
- Encrypt(ctx context.Context, key string, plaintext []byte) ([]byte, error)
- Decrypt(ctx context.Context, key string, ciphertext []byte) ([]byte, error)
-}
-
-// Entry is used to represent data stored by the security barrier
-type Entry struct {
- Key string
- Value []byte
- SealWrap bool
-}
-
-// Logical turns the Entry into a logical storage entry.
-func (e *Entry) Logical() *logical.StorageEntry {
- return &logical.StorageEntry{
- Key: e.Key,
- Value: e.Value,
- SealWrap: e.SealWrap,
- }
-}
-
-// KeyInfo is used to convey information about the encryption key
-type KeyInfo struct {
- Term int
- InstallTime time.Time
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_access.go b/vendor/github.com/hashicorp/vault/vault/barrier_access.go
deleted file mode 100644
index 84e6e747..00000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_access.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package vault
-
-import "context"
-
-// BarrierEncryptorAccess is a wrapper around BarrierEncryptor that allows Core
-// to expose its barrier encrypt/decrypt operations through BarrierEncryptorAccess()
-// while restricting the ability to modify Core.barrier itself.
-type BarrierEncryptorAccess struct {
- barrierEncryptor BarrierEncryptor
-}
-
-var _ BarrierEncryptor = (*BarrierEncryptorAccess)(nil)
-
-func NewBarrierEncryptorAccess(barrierEncryptor BarrierEncryptor) *BarrierEncryptorAccess {
- return &BarrierEncryptorAccess{barrierEncryptor: barrierEncryptor}
-}
-
-func (b *BarrierEncryptorAccess) Encrypt(ctx context.Context, key string, plaintext []byte) ([]byte, error) {
- return b.barrierEncryptor.Encrypt(ctx, key, plaintext)
-}
-
-func (b *BarrierEncryptorAccess) Decrypt(ctx context.Context, key string, ciphertext []byte) ([]byte, error) {
- return b.barrierEncryptor.Decrypt(ctx, key, ciphertext)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go b/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go
deleted file mode 100644
index 8ddae289..00000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_aes_gcm.go
+++ /dev/null
@@ -1,949 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/aes"
- "crypto/cipher"
- "crypto/rand"
- "crypto/subtle"
- "encoding/binary"
- "errors"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/physical"
-)
-
-const (
- // initialKeyTerm is the hard coded initial key term. This is
- // used only for values that are not encrypted with the keyring.
- initialKeyTerm = 1
-
- // termSize the number of bytes used for the key term.
- termSize = 4
-)
-
-// Versions of the AESGCM storage methodology
-const (
- AESGCMVersion1 = 0x1
- AESGCMVersion2 = 0x2
-)
-
-// barrierInit is the JSON encoded value stored
-type barrierInit struct {
- Version int // Version is the current format version
- Key []byte // Key is the primary encryption key
-}
-
-// Validate AESGCMBarrier satisfies SecurityBarrier interface
-var _ SecurityBarrier = &AESGCMBarrier{}
-
-// AESGCMBarrier is a SecurityBarrier implementation that uses the AES
-// cipher core and the Galois Counter Mode block mode. It defaults to
-// the golang NONCE default value of 12 and a key size of 256
-// bit. AES-GCM is high performance, and provides both confidentiality
-// and integrity.
-type AESGCMBarrier struct {
- backend physical.Backend
-
- l sync.RWMutex
- sealed bool
-
- // keyring is used to maintain all of the encryption keys, including
- // the active key used for encryption, but also prior keys to allow
- // decryption of keys encrypted under previous terms.
- keyring *Keyring
-
- // cache is used to reduce the number of AEAD constructions we do
- cache map[uint32]cipher.AEAD
- cacheLock sync.RWMutex
-
- // currentAESGCMVersionByte is prefixed to a message to allow for
- // future versioning of barrier implementations. It's var instead
- // of const to allow for testing
- currentAESGCMVersionByte byte
-}
-
-// NewAESGCMBarrier is used to construct a new barrier that uses
-// the provided physical backend for storage.
-func NewAESGCMBarrier(physical physical.Backend) (*AESGCMBarrier, error) {
- b := &AESGCMBarrier{
- backend: physical,
- sealed: true,
- cache: make(map[uint32]cipher.AEAD),
- currentAESGCMVersionByte: byte(AESGCMVersion2),
- }
- return b, nil
-}
-
-// Initialized checks if the barrier has been initialized
-// and has a master key set.
-func (b *AESGCMBarrier) Initialized(ctx context.Context) (bool, error) {
- // Read the keyring file
- keys, err := b.backend.List(ctx, keyringPrefix)
- if err != nil {
- return false, errwrap.Wrapf("failed to check for initialization: {{err}}", err)
- }
- if strutil.StrListContains(keys, "keyring") {
- return true, nil
- }
-
- // Fallback, check for the old sentinel file
- out, err := b.backend.Get(ctx, barrierInitPath)
- if err != nil {
- return false, errwrap.Wrapf("failed to check for initialization: {{err}}", err)
- }
- return out != nil, nil
-}
-
-// Initialize works only if the barrier has not been initialized
-// and makes use of the given master key.
-func (b *AESGCMBarrier) Initialize(ctx context.Context, key []byte) error {
- // Verify the key size
- min, max := b.KeyLength()
- if len(key) < min || len(key) > max {
- return fmt.Errorf("key size must be %d or %d", min, max)
- }
-
- // Check if already initialized
- if alreadyInit, err := b.Initialized(ctx); err != nil {
- return err
- } else if alreadyInit {
- return ErrBarrierAlreadyInit
- }
-
- // Generate encryption key
- encrypt, err := b.GenerateKey()
- if err != nil {
- return errwrap.Wrapf("failed to generate encryption key: {{err}}", err)
- }
-
- // Create a new keyring, install the keys
- keyring := NewKeyring()
- keyring = keyring.SetMasterKey(key)
- keyring, err = keyring.AddKey(&Key{
- Term: 1,
- Version: 1,
- Value: encrypt,
- })
- if err != nil {
- return errwrap.Wrapf("failed to create keyring: {{err}}", err)
- }
- return b.persistKeyring(ctx, keyring)
-}
-
-// persistKeyring is used to write out the keyring using the
-// master key to encrypt it.
-func (b *AESGCMBarrier) persistKeyring(ctx context.Context, keyring *Keyring) error {
- // Create the keyring entry
- keyringBuf, err := keyring.Serialize()
- defer memzero(keyringBuf)
- if err != nil {
- return errwrap.Wrapf("failed to serialize keyring: {{err}}", err)
- }
-
- // Create the AES-GCM
- gcm, err := b.aeadFromKey(keyring.MasterKey())
- if err != nil {
- return err
- }
-
- // Encrypt the barrier init value
- value, err := b.encrypt(keyringPath, initialKeyTerm, gcm, keyringBuf)
- if err != nil {
- return err
- }
-
- // Create the keyring physical entry
- pe := &physical.Entry{
- Key: keyringPath,
- Value: value,
- }
- if err := b.backend.Put(ctx, pe); err != nil {
- return errwrap.Wrapf("failed to persist keyring: {{err}}", err)
- }
-
- // Serialize the master key value
- key := &Key{
- Term: 1,
- Version: 1,
- Value: keyring.MasterKey(),
- }
- keyBuf, err := key.Serialize()
- defer memzero(keyBuf)
- if err != nil {
- return errwrap.Wrapf("failed to serialize master key: {{err}}", err)
- }
-
- // Encrypt the master key
- activeKey := keyring.ActiveKey()
- aead, err := b.aeadFromKey(activeKey.Value)
- if err != nil {
- return err
- }
- value, err = b.encrypt(masterKeyPath, activeKey.Term, aead, keyBuf)
- if err != nil {
- return err
- }
-
- // Update the masterKeyPath for standby instances
- pe = &physical.Entry{
- Key: masterKeyPath,
- Value: value,
- }
- if err := b.backend.Put(ctx, pe); err != nil {
- return errwrap.Wrapf("failed to persist master key: {{err}}", err)
- }
- return nil
-}
-
-// GenerateKey is used to generate a new key
-func (b *AESGCMBarrier) GenerateKey() ([]byte, error) {
- // Generate a 256bit key
- buf := make([]byte, 2*aes.BlockSize)
- _, err := rand.Read(buf)
- return buf, err
-}
-
-// KeyLength is used to sanity check a key
-func (b *AESGCMBarrier) KeyLength() (int, int) {
- return aes.BlockSize, 2 * aes.BlockSize
-}
-
-// Sealed checks if the barrier has been unlocked yet. The Barrier
-// is not expected to be able to perform any CRUD until it is unsealed.
-func (b *AESGCMBarrier) Sealed() (bool, error) {
- b.l.RLock()
- sealed := b.sealed
- b.l.RUnlock()
- return sealed, nil
-}
-
-// VerifyMaster is used to check if the given key matches the master key
-func (b *AESGCMBarrier) VerifyMaster(key []byte) error {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return ErrBarrierSealed
- }
- if subtle.ConstantTimeCompare(key, b.keyring.MasterKey()) != 1 {
- return ErrBarrierInvalidKey
- }
- return nil
-}
-
-// ReloadKeyring is used to re-read the underlying keyring.
-// This is used for HA deployments to ensure the latest keyring
-// is present in the leader.
-func (b *AESGCMBarrier) ReloadKeyring(ctx context.Context) error {
- b.l.Lock()
- defer b.l.Unlock()
-
- // Create the AES-GCM
- gcm, err := b.aeadFromKey(b.keyring.MasterKey())
- if err != nil {
- return err
- }
-
- // Read in the keyring
- out, err := b.backend.Get(ctx, keyringPath)
- if err != nil {
- return errwrap.Wrapf("failed to check for keyring: {{err}}", err)
- }
-
- // Ensure that the keyring exists. This should never happen,
- // and indicates something really bad has happened.
- if out == nil {
- return errors.New("keyring unexpectedly missing")
- }
-
- // Verify the term is always just one
- term := binary.BigEndian.Uint32(out.Value[:4])
- if term != initialKeyTerm {
- return errors.New("term mis-match")
- }
-
- // Decrypt the barrier init key
- plain, err := b.decrypt(keyringPath, gcm, out.Value)
- defer memzero(plain)
- if err != nil {
- if strings.Contains(err.Error(), "message authentication failed") {
- return ErrBarrierInvalidKey
- }
- return err
- }
-
- // Recover the keyring
- keyring, err := DeserializeKeyring(plain)
- if err != nil {
- return errwrap.Wrapf("keyring deserialization failed: {{err}}", err)
- }
-
- // Setup the keyring and finish
- b.keyring = keyring
- return nil
-}
-
-// ReloadMasterKey is used to re-read the underlying masterkey.
-// This is used for HA deployments to ensure the latest master key
-// is available for keyring reloading.
-func (b *AESGCMBarrier) ReloadMasterKey(ctx context.Context) error {
- // Read the masterKeyPath upgrade
- out, err := b.Get(ctx, masterKeyPath)
- if err != nil {
- return errwrap.Wrapf("failed to read master key path: {{err}}", err)
- }
-
- // The masterKeyPath could be missing (backwards incompatible),
- // we can ignore this and attempt to make progress with the current
- // master key.
- if out == nil {
- return nil
- }
-
- defer memzero(out.Value)
-
- // Deserialize the master key
- key, err := DeserializeKey(out.Value)
- if err != nil {
- return errwrap.Wrapf("failed to deserialize key: {{err}}", err)
- }
-
- b.l.Lock()
- defer b.l.Unlock()
-
- // Check if the master key is the same
- if subtle.ConstantTimeCompare(b.keyring.MasterKey(), key.Value) == 1 {
- return nil
- }
-
- // Update the master key
- oldKeyring := b.keyring
- b.keyring = b.keyring.SetMasterKey(key.Value)
- oldKeyring.Zeroize(false)
- return nil
-}
-
-// Unseal is used to provide the master key which permits the barrier
-// to be unsealed. If the key is not correct, the barrier remains sealed.
-func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error {
- b.l.Lock()
- defer b.l.Unlock()
-
- // Do nothing if already unsealed
- if !b.sealed {
- return nil
- }
-
- // Create the AES-GCM
- gcm, err := b.aeadFromKey(key)
- if err != nil {
- return err
- }
-
- // Read in the keyring
- out, err := b.backend.Get(ctx, keyringPath)
- if err != nil {
- return errwrap.Wrapf("failed to check for keyring: {{err}}", err)
- }
- if out != nil {
- // Verify the term is always just one
- term := binary.BigEndian.Uint32(out.Value[:4])
- if term != initialKeyTerm {
- return errors.New("term mis-match")
- }
-
- // Decrypt the barrier init key
- plain, err := b.decrypt(keyringPath, gcm, out.Value)
- defer memzero(plain)
- if err != nil {
- if strings.Contains(err.Error(), "message authentication failed") {
- return ErrBarrierInvalidKey
- }
- return err
- }
-
- // Recover the keyring
- keyring, err := DeserializeKeyring(plain)
- if err != nil {
- return errwrap.Wrapf("keyring deserialization failed: {{err}}", err)
- }
-
- // Setup the keyring and finish
- b.keyring = keyring
- b.sealed = false
- return nil
- }
-
- // Read the barrier initialization key
- out, err = b.backend.Get(ctx, barrierInitPath)
- if err != nil {
- return errwrap.Wrapf("failed to check for initialization: {{err}}", err)
- }
- if out == nil {
- return ErrBarrierNotInit
- }
-
- // Verify the term is always just one
- term := binary.BigEndian.Uint32(out.Value[:4])
- if term != initialKeyTerm {
- return errors.New("term mis-match")
- }
-
- // Decrypt the barrier init key
- plain, err := b.decrypt(barrierInitPath, gcm, out.Value)
- if err != nil {
- if strings.Contains(err.Error(), "message authentication failed") {
- return ErrBarrierInvalidKey
- }
- return err
- }
- defer memzero(plain)
-
- // Unmarshal the barrier init
- var init barrierInit
- if err := jsonutil.DecodeJSON(plain, &init); err != nil {
- return fmt.Errorf("failed to unmarshal barrier init file")
- }
-
- // Setup a new keyring, this is for backwards compatibility
- keyringNew := NewKeyring()
- keyring := keyringNew.SetMasterKey(key)
-
- // AddKey reuses the master, so we are only zeroizing after this call
- defer keyringNew.Zeroize(false)
-
- keyring, err = keyring.AddKey(&Key{
- Term: 1,
- Version: 1,
- Value: init.Key,
- })
- if err != nil {
- return errwrap.Wrapf("failed to create keyring: {{err}}", err)
- }
- if err := b.persistKeyring(ctx, keyring); err != nil {
- return err
- }
-
- // Delete the old barrier entry
- if err := b.backend.Delete(ctx, barrierInitPath); err != nil {
- return errwrap.Wrapf("failed to delete barrier init file: {{err}}", err)
- }
-
- // Set the vault as unsealed
- b.keyring = keyring
- b.sealed = false
- return nil
-}
-
-// Seal is used to re-seal the barrier. This requires the barrier to
-// be unsealed again to perform any further operations.
-func (b *AESGCMBarrier) Seal() error {
- b.l.Lock()
- defer b.l.Unlock()
-
- // Remove the primary key, and seal the vault
- b.cache = make(map[uint32]cipher.AEAD)
- b.keyring.Zeroize(true)
- b.keyring = nil
- b.sealed = true
- return nil
-}
-
-// Rotate is used to create a new encryption key. All future writes
-// should use the new key, while old values should still be decryptable.
-func (b *AESGCMBarrier) Rotate(ctx context.Context) (uint32, error) {
- b.l.Lock()
- defer b.l.Unlock()
- if b.sealed {
- return 0, ErrBarrierSealed
- }
-
- // Generate a new key
- encrypt, err := b.GenerateKey()
- if err != nil {
- return 0, errwrap.Wrapf("failed to generate encryption key: {{err}}", err)
- }
-
- // Get the next term
- term := b.keyring.ActiveTerm()
- newTerm := term + 1
-
- // Add a new encryption key
- newKeyring, err := b.keyring.AddKey(&Key{
- Term: newTerm,
- Version: 1,
- Value: encrypt,
- })
- if err != nil {
- return 0, errwrap.Wrapf("failed to add new encryption key: {{err}}", err)
- }
-
- // Persist the new keyring
- if err := b.persistKeyring(ctx, newKeyring); err != nil {
- return 0, err
- }
-
- // Swap the keyrings
- b.keyring = newKeyring
- return newTerm, nil
-}
-
-// CreateUpgrade creates an upgrade path key to the given term from the previous term
-func (b *AESGCMBarrier) CreateUpgrade(ctx context.Context, term uint32) error {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return ErrBarrierSealed
- }
-
- // Get the key for this term
- termKey := b.keyring.TermKey(term)
- buf, err := termKey.Serialize()
- defer memzero(buf)
- if err != nil {
- return err
- }
-
- // Get the AEAD for the previous term
- prevTerm := term - 1
- primary, err := b.aeadForTerm(prevTerm)
- if err != nil {
- return err
- }
-
- key := fmt.Sprintf("%s%d", keyringUpgradePrefix, prevTerm)
- value, err := b.encrypt(key, prevTerm, primary, buf)
- if err != nil {
- return err
- }
- // Create upgrade key
- pe := &physical.Entry{
- Key: key,
- Value: value,
- }
- return b.backend.Put(ctx, pe)
-}
-
-// DestroyUpgrade destroys the upgrade path key to the given term
-func (b *AESGCMBarrier) DestroyUpgrade(ctx context.Context, term uint32) error {
- path := fmt.Sprintf("%s%d", keyringUpgradePrefix, term-1)
- return b.Delete(ctx, path)
-}
-
-// CheckUpgrade looks for an upgrade to the current term and installs it
-func (b *AESGCMBarrier) CheckUpgrade(ctx context.Context) (bool, uint32, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return false, 0, ErrBarrierSealed
- }
-
- // Get the current term
- activeTerm := b.keyring.ActiveTerm()
-
- // Check for an upgrade key
- upgrade := fmt.Sprintf("%s%d", keyringUpgradePrefix, activeTerm)
- entry, err := b.Get(ctx, upgrade)
- if err != nil {
- return false, 0, err
- }
-
- // Nothing to do if no upgrade
- if entry == nil {
- return false, 0, nil
- }
-
- defer memzero(entry.Value)
-
- // Deserialize the key
- key, err := DeserializeKey(entry.Value)
- if err != nil {
- return false, 0, err
- }
-
- // Upgrade from read lock to write lock
- b.l.RUnlock()
- defer b.l.RLock()
- b.l.Lock()
- defer b.l.Unlock()
-
- // Update the keyring
- newKeyring, err := b.keyring.AddKey(key)
- if err != nil {
- return false, 0, errwrap.Wrapf("failed to add new encryption key: {{err}}", err)
- }
- b.keyring = newKeyring
-
- // Done!
- return true, key.Term, nil
-}
-
-// ActiveKeyInfo is used to inform details about the active key
-func (b *AESGCMBarrier) ActiveKeyInfo() (*KeyInfo, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- // Determine the key install time
- term := b.keyring.ActiveTerm()
- key := b.keyring.TermKey(term)
-
- // Return the key info
- info := &KeyInfo{
- Term: int(term),
- InstallTime: key.InstallTime,
- }
- return info, nil
-}
-
-// Rekey is used to change the master key used to protect the keyring
-func (b *AESGCMBarrier) Rekey(ctx context.Context, key []byte) error {
- b.l.Lock()
- defer b.l.Unlock()
-
- newKeyring, err := b.updateMasterKeyCommon(key)
- if err != nil {
- return err
- }
-
- // Persist the new keyring
- if err := b.persistKeyring(ctx, newKeyring); err != nil {
- return err
- }
-
- // Swap the keyrings
- oldKeyring := b.keyring
- b.keyring = newKeyring
- oldKeyring.Zeroize(false)
- return nil
-}
-
-// SetMasterKey updates the keyring's in-memory master key but does not persist
-// anything to storage
-func (b *AESGCMBarrier) SetMasterKey(key []byte) error {
- b.l.Lock()
- defer b.l.Unlock()
-
- newKeyring, err := b.updateMasterKeyCommon(key)
- if err != nil {
- return err
- }
-
- // Swap the keyrings
- oldKeyring := b.keyring
- b.keyring = newKeyring
- oldKeyring.Zeroize(false)
- return nil
-}
-
-// Performs common tasks related to updating the master key; note that the lock
-// must be held before calling this function
-func (b *AESGCMBarrier) updateMasterKeyCommon(key []byte) (*Keyring, error) {
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- // Verify the key size
- min, max := b.KeyLength()
- if len(key) < min || len(key) > max {
- return nil, fmt.Errorf("key size must be %d or %d", min, max)
- }
-
- return b.keyring.SetMasterKey(key), nil
-}
-
-// Put is used to insert or update an entry
-func (b *AESGCMBarrier) Put(ctx context.Context, entry *Entry) error {
- defer metrics.MeasureSince([]string{"barrier", "put"}, time.Now())
- b.l.RLock()
- if b.sealed {
- b.l.RUnlock()
- return ErrBarrierSealed
- }
-
- term := b.keyring.ActiveTerm()
- primary, err := b.aeadForTerm(term)
- b.l.RUnlock()
- if err != nil {
- return err
- }
-
- value, err := b.encrypt(entry.Key, term, primary, entry.Value)
- if err != nil {
- return err
- }
- pe := &physical.Entry{
- Key: entry.Key,
- Value: value,
- SealWrap: entry.SealWrap,
- }
- return b.backend.Put(ctx, pe)
-}
-
-// Get is used to fetch an entry
-func (b *AESGCMBarrier) Get(ctx context.Context, key string) (*Entry, error) {
- defer metrics.MeasureSince([]string{"barrier", "get"}, time.Now())
- b.l.RLock()
- if b.sealed {
- b.l.RUnlock()
- return nil, ErrBarrierSealed
- }
-
- // Read the key from the backend
- pe, err := b.backend.Get(ctx, key)
- if err != nil {
- b.l.RUnlock()
- return nil, err
- } else if pe == nil {
- b.l.RUnlock()
- return nil, nil
- }
-
- if len(pe.Value) < 4 {
- b.l.RUnlock()
- return nil, errors.New("invalid value")
- }
-
- // Verify the term
- term := binary.BigEndian.Uint32(pe.Value[:4])
-
- // Get the GCM by term
- // It is expensive to do this first but it is not a
- // normal case that this won't match
- gcm, err := b.aeadForTerm(term)
- b.l.RUnlock()
- if err != nil {
- return nil, err
- }
- if gcm == nil {
- return nil, fmt.Errorf("no decryption key available for term %d", term)
- }
-
- // Decrypt the ciphertext
- plain, err := b.decrypt(key, gcm, pe.Value)
- if err != nil {
- return nil, errwrap.Wrapf("decryption failed: {{err}}", err)
- }
-
- // Wrap in a logical entry
- entry := &Entry{
- Key: key,
- Value: plain,
- SealWrap: pe.SealWrap,
- }
- return entry, nil
-}
-
-// Delete is used to permanently delete an entry
-func (b *AESGCMBarrier) Delete(ctx context.Context, key string) error {
- defer metrics.MeasureSince([]string{"barrier", "delete"}, time.Now())
- b.l.RLock()
- sealed := b.sealed
- b.l.RUnlock()
- if sealed {
- return ErrBarrierSealed
- }
-
- return b.backend.Delete(ctx, key)
-}
-
-// List is used ot list all the keys under a given
-// prefix, up to the next prefix.
-func (b *AESGCMBarrier) List(ctx context.Context, prefix string) ([]string, error) {
- defer metrics.MeasureSince([]string{"barrier", "list"}, time.Now())
- b.l.RLock()
- sealed := b.sealed
- b.l.RUnlock()
- if sealed {
- return nil, ErrBarrierSealed
- }
-
- return b.backend.List(ctx, prefix)
-}
-
-// aeadForTerm returns the AES-GCM AEAD for the given term
-func (b *AESGCMBarrier) aeadForTerm(term uint32) (cipher.AEAD, error) {
- // Check for the keyring
- keyring := b.keyring
- if keyring == nil {
- return nil, nil
- }
-
- // Check the cache for the aead
- b.cacheLock.RLock()
- aead, ok := b.cache[term]
- b.cacheLock.RUnlock()
- if ok {
- return aead, nil
- }
-
- // Read the underlying key
- key := keyring.TermKey(term)
- if key == nil {
- return nil, nil
- }
-
- // Create a new aead
- aead, err := b.aeadFromKey(key.Value)
- if err != nil {
- return nil, err
- }
-
- // Update the cache
- b.cacheLock.Lock()
- b.cache[term] = aead
- b.cacheLock.Unlock()
- return aead, nil
-}
-
-// aeadFromKey returns an AES-GCM AEAD using the given key.
-func (b *AESGCMBarrier) aeadFromKey(key []byte) (cipher.AEAD, error) {
- // Create the AES cipher
- aesCipher, err := aes.NewCipher(key)
- if err != nil {
- return nil, errwrap.Wrapf("failed to create cipher: {{err}}", err)
- }
-
- // Create the GCM mode AEAD
- gcm, err := cipher.NewGCM(aesCipher)
- if err != nil {
- return nil, fmt.Errorf("failed to initialize GCM mode")
- }
- return gcm, nil
-}
-
-// encrypt is used to encrypt a value
-func (b *AESGCMBarrier) encrypt(path string, term uint32, gcm cipher.AEAD, plain []byte) ([]byte, error) {
- // Allocate the output buffer with room for tern, version byte,
- // nonce, GCM tag and the plaintext
- capacity := termSize + 1 + gcm.NonceSize() + gcm.Overhead() + len(plain)
- size := termSize + 1 + gcm.NonceSize()
- out := make([]byte, size, capacity)
-
- // Set the key term
- binary.BigEndian.PutUint32(out[:4], term)
-
- // Set the version byte
- out[4] = b.currentAESGCMVersionByte
-
- // Generate a random nonce
- nonce := out[5 : 5+gcm.NonceSize()]
- n, err := rand.Read(nonce)
- if err != nil {
- return nil, err
- }
- if n != len(nonce) {
- return nil, errors.New("unable to read enough random bytes to fill gcm nonce")
- }
-
- // Seal the output
- switch b.currentAESGCMVersionByte {
- case AESGCMVersion1:
- out = gcm.Seal(out, nonce, plain, nil)
- case AESGCMVersion2:
- aad := []byte(nil)
- if path != "" {
- aad = []byte(path)
- }
- out = gcm.Seal(out, nonce, plain, aad)
- default:
- panic("Unknown AESGCM version")
- }
-
- return out, nil
-}
-
-// decrypt is used to decrypt a value using the keyring
-func (b *AESGCMBarrier) decrypt(path string, gcm cipher.AEAD, cipher []byte) ([]byte, error) {
- // Capture the parts
- nonce := cipher[5 : 5+gcm.NonceSize()]
- raw := cipher[5+gcm.NonceSize():]
- out := make([]byte, 0, len(raw)-gcm.NonceSize())
-
- // Attempt to open
- switch cipher[4] {
- case AESGCMVersion1:
- return gcm.Open(out, nonce, raw, nil)
- case AESGCMVersion2:
- aad := []byte(nil)
- if path != "" {
- aad = []byte(path)
- }
- return gcm.Open(out, nonce, raw, aad)
- default:
- return nil, fmt.Errorf("version bytes mis-match")
- }
-}
-
-// Encrypt is used to encrypt in-memory for the BarrierEncryptor interface
-func (b *AESGCMBarrier) Encrypt(ctx context.Context, key string, plaintext []byte) ([]byte, error) {
- b.l.RLock()
- if b.sealed {
- b.l.RUnlock()
- return nil, ErrBarrierSealed
- }
-
- term := b.keyring.ActiveTerm()
- primary, err := b.aeadForTerm(term)
- b.l.RUnlock()
- if err != nil {
- return nil, err
- }
-
- ciphertext, err := b.encrypt(key, term, primary, plaintext)
- if err != nil {
- return nil, err
- }
- return ciphertext, nil
-}
-
-// Decrypt is used to decrypt in-memory for the BarrierEncryptor interface
-func (b *AESGCMBarrier) Decrypt(ctx context.Context, key string, ciphertext []byte) ([]byte, error) {
- b.l.RLock()
- if b.sealed {
- b.l.RUnlock()
- return nil, ErrBarrierSealed
- }
-
- // Verify the term
- term := binary.BigEndian.Uint32(ciphertext[:4])
-
- // Get the GCM by term
- // It is expensive to do this first but it is not a
- // normal case that this won't match
- gcm, err := b.aeadForTerm(term)
- b.l.RUnlock()
- if err != nil {
- return nil, err
- }
- if gcm == nil {
- return nil, fmt.Errorf("no decryption key available for term %d", term)
- }
-
- // Decrypt the ciphertext
- plain, err := b.decrypt(key, gcm, ciphertext)
- if err != nil {
- return nil, errwrap.Wrapf("decryption failed: {{err}}", err)
- }
-
- return plain, nil
-}
-
-func (b *AESGCMBarrier) Keyring() (*Keyring, error) {
- b.l.RLock()
- defer b.l.RUnlock()
- if b.sealed {
- return nil, ErrBarrierSealed
- }
-
- return b.keyring.Clone(), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view.go b/vendor/github.com/hashicorp/vault/vault/barrier_view.go
deleted file mode 100644
index 94fbac9a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_view.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/logical"
-)
-
-// BarrierView wraps a SecurityBarrier and ensures all access is automatically
-// prefixed. This is used to prevent anyone with access to the view to access
-// any data in the durable storage outside of their prefix. Conceptually this
-// is like a "chroot" into the barrier.
-//
-// BarrierView implements logical.Storage so it can be passed in as the
-// durable storage mechanism for logical views.
-type BarrierView struct {
- barrier BarrierStorage
- prefix string
- readOnlyErr error
- readOnlyErrLock sync.RWMutex
- iCheck interface{}
-}
-
-var (
- ErrRelativePath = errors.New("relative paths not supported")
-)
-
-// NewBarrierView takes an underlying security barrier and returns
-// a view of it that can only operate with the given prefix.
-func NewBarrierView(barrier BarrierStorage, prefix string) *BarrierView {
- return &BarrierView{
- barrier: barrier,
- prefix: prefix,
- }
-}
-
-func (v *BarrierView) setICheck(iCheck interface{}) {
- v.iCheck = iCheck
-}
-
-func (v *BarrierView) setReadOnlyErr(readOnlyErr error) {
- v.readOnlyErrLock.Lock()
- defer v.readOnlyErrLock.Unlock()
- v.readOnlyErr = readOnlyErr
-}
-
-func (v *BarrierView) getReadOnlyErr() error {
- v.readOnlyErrLock.RLock()
- defer v.readOnlyErrLock.RUnlock()
- return v.readOnlyErr
-}
-
-// sanityCheck is used to perform a sanity check on a key
-func (v *BarrierView) sanityCheck(key string) error {
- if strings.Contains(key, "..") {
- return ErrRelativePath
- }
- return nil
-}
-
-// logical.Storage impl.
-func (v *BarrierView) List(ctx context.Context, prefix string) ([]string, error) {
- if err := v.sanityCheck(prefix); err != nil {
- return nil, err
- }
- return v.barrier.List(ctx, v.expandKey(prefix))
-}
-
-// logical.Storage impl.
-func (v *BarrierView) Get(ctx context.Context, key string) (*logical.StorageEntry, error) {
- if err := v.sanityCheck(key); err != nil {
- return nil, err
- }
- entry, err := v.barrier.Get(ctx, v.expandKey(key))
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
- if entry != nil {
- entry.Key = v.truncateKey(entry.Key)
- }
-
- return &logical.StorageEntry{
- Key: entry.Key,
- Value: entry.Value,
- SealWrap: entry.SealWrap,
- }, nil
-}
-
-// logical.Storage impl.
-func (v *BarrierView) Put(ctx context.Context, entry *logical.StorageEntry) error {
- if entry == nil {
- return errors.New("cannot write nil entry")
- }
-
- if err := v.sanityCheck(entry.Key); err != nil {
- return err
- }
-
- expandedKey := v.expandKey(entry.Key)
-
- roErr := v.getReadOnlyErr()
- if roErr != nil {
- if runICheck(v, expandedKey, roErr) {
- return roErr
- }
- }
-
- nested := &Entry{
- Key: expandedKey,
- Value: entry.Value,
- SealWrap: entry.SealWrap,
- }
- return v.barrier.Put(ctx, nested)
-}
-
-// logical.Storage impl.
-func (v *BarrierView) Delete(ctx context.Context, key string) error {
- if err := v.sanityCheck(key); err != nil {
- return err
- }
-
- expandedKey := v.expandKey(key)
-
- roErr := v.getReadOnlyErr()
- if roErr != nil {
- if runICheck(v, expandedKey, roErr) {
- return roErr
- }
- }
-
- return v.barrier.Delete(ctx, expandedKey)
-}
-
-// SubView constructs a nested sub-view using the given prefix
-func (v *BarrierView) SubView(prefix string) *BarrierView {
- sub := v.expandKey(prefix)
- return &BarrierView{barrier: v.barrier, prefix: sub, readOnlyErr: v.getReadOnlyErr(), iCheck: v.iCheck}
-}
-
-// expandKey is used to expand to the full key path with the prefix
-func (v *BarrierView) expandKey(suffix string) string {
- return v.prefix + suffix
-}
-
-// truncateKey is used to remove the prefix of the key
-func (v *BarrierView) truncateKey(full string) string {
- return strings.TrimPrefix(full, v.prefix)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/barrier_view_util.go b/vendor/github.com/hashicorp/vault/vault/barrier_view_util.go
deleted file mode 100644
index f7c63405..00000000
--- a/vendor/github.com/hashicorp/vault/vault/barrier_view_util.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// +build !enterprise
-
-package vault
-
-func runICheck(v *BarrierView, expandedKey string, roErr error) bool { return true }
diff --git a/vendor/github.com/hashicorp/vault/vault/capabilities.go b/vendor/github.com/hashicorp/vault/vault/capabilities.go
deleted file mode 100644
index 36e17bde..00000000
--- a/vendor/github.com/hashicorp/vault/vault/capabilities.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package vault
-
-import (
- "context"
- "sort"
-
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
-)
-
-// Capabilities is used to fetch the capabilities of the given token on the
-// given path
-func (c *Core) Capabilities(ctx context.Context, token, path string) ([]string, error) {
- if path == "" {
- return nil, &logical.StatusBadRequest{Err: "missing path"}
- }
-
- if token == "" {
- return nil, &logical.StatusBadRequest{Err: "missing token"}
- }
-
- te, err := c.tokenStore.Lookup(ctx, token)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return nil, &logical.StatusBadRequest{Err: "invalid token"}
- }
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c)
- if err != nil {
- return nil, err
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- var policyCount int
- policyNames := make(map[string][]string)
- policyNames[tokenNS.ID] = te.Policies
- policyCount += len(te.Policies)
-
- entity, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, te.EntityID)
- if err != nil {
- return nil, err
- }
- if entity != nil && entity.Disabled {
- c.logger.Warn("permission denied as the entity on the token is disabled")
- return nil, logical.ErrPermissionDenied
- }
- if te.EntityID != "" && entity == nil {
- c.logger.Warn("permission denied as the entity on the token is invalid")
- return nil, logical.ErrPermissionDenied
- }
-
- for nsID, nsPolicies := range identityPolicies {
- policyNames[nsID] = append(policyNames[nsID], nsPolicies...)
- policyCount += len(nsPolicies)
- }
-
- if policyCount == 0 {
- return []string{DenyCapability}, nil
- }
-
- // Construct the corresponding ACL object. ACL construction should be
- // performed on the token's namespace.
- tokenCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- acl, err := c.policyStore.ACL(tokenCtx, entity, policyNames)
- if err != nil {
- return nil, err
- }
-
- capabilities := acl.Capabilities(ctx, path)
- sort.Strings(capabilities)
- return capabilities, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster.go b/vendor/github.com/hashicorp/vault/vault/cluster.go
deleted file mode 100644
index 356722a0..00000000
--- a/vendor/github.com/hashicorp/vault/vault/cluster.go
+++ /dev/null
@@ -1,382 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/json"
- "errors"
- "fmt"
- "math/big"
- mathrand "math/rand"
- "net"
- "net/http"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-const (
- // Storage path where the local cluster name and identifier are stored
- coreLocalClusterInfoPath = "core/cluster/local/info"
-
- corePrivateKeyTypeP521 = "p521"
- corePrivateKeyTypeED25519 = "ed25519"
-
- // Internal so as not to log a trace message
- IntNoForwardingHeaderName = "X-Vault-Internal-No-Request-Forwarding"
-)
-
-var (
- ErrCannotForward = errors.New("cannot forward request; no connection or address not known")
-)
-
-type ReplicatedClusters struct {
- DR *ReplicatedCluster
- Performance *ReplicatedCluster
-}
-
-// This can be one of a few key types so the different params may or may not be filled
-type clusterKeyParams struct {
- Type string `json:"type" structs:"type" mapstructure:"type"`
- X *big.Int `json:"x" structs:"x" mapstructure:"x"`
- Y *big.Int `json:"y" structs:"y" mapstructure:"y"`
- D *big.Int `json:"d" structs:"d" mapstructure:"d"`
-}
-
-// Structure representing the storage entry that holds cluster information
-type Cluster struct {
- // Name of the cluster
- Name string `json:"name" structs:"name" mapstructure:"name"`
-
- // Identifier of the cluster
- ID string `json:"id" structs:"id" mapstructure:"id"`
-}
-
-// Cluster fetches the details of the local cluster. This method errors out
-// when Vault is sealed.
-func (c *Core) Cluster(ctx context.Context) (*Cluster, error) {
- var cluster Cluster
-
- // Fetch the storage entry. This call fails when Vault is sealed.
- entry, err := c.barrier.Get(ctx, coreLocalClusterInfoPath)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return &cluster, nil
- }
-
- // Decode the cluster information
- if err = jsonutil.DecodeJSON(entry.Value, &cluster); err != nil {
- return nil, errwrap.Wrapf("failed to decode cluster details: {{err}}", err)
- }
-
- // Set in config file
- if c.clusterName != "" {
- cluster.Name = c.clusterName
- }
-
- return &cluster, nil
-}
-
-// This sets our local cluster cert and private key based on the advertisement.
-// It also ensures the cert is in our local cluster cert pool.
-func (c *Core) loadLocalClusterTLS(adv activeAdvertisement) (retErr error) {
- defer func() {
- if retErr != nil {
- c.localClusterCert.Store(([]byte)(nil))
- c.localClusterParsedCert.Store((*x509.Certificate)(nil))
- c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil))
-
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
- }
- }()
-
- switch {
- case adv.ClusterAddr == "":
- // Clustering disabled on the server, don't try to look for params
- return nil
-
- case adv.ClusterKeyParams == nil:
- c.logger.Error("no key params found loading local cluster TLS information")
- return fmt.Errorf("no local cluster key params found")
-
- case adv.ClusterKeyParams.X == nil, adv.ClusterKeyParams.Y == nil, adv.ClusterKeyParams.D == nil:
- c.logger.Error("failed to parse local cluster key due to missing params")
- return fmt.Errorf("failed to parse local cluster key")
-
- case adv.ClusterKeyParams.Type != corePrivateKeyTypeP521:
- c.logger.Error("unknown local cluster key type", "key_type", adv.ClusterKeyParams.Type)
- return fmt.Errorf("failed to find valid local cluster key type")
-
- case adv.ClusterCert == nil || len(adv.ClusterCert) == 0:
- c.logger.Error("no local cluster cert found")
- return fmt.Errorf("no local cluster cert found")
-
- }
-
- c.localClusterPrivateKey.Store(&ecdsa.PrivateKey{
- PublicKey: ecdsa.PublicKey{
- Curve: elliptic.P521(),
- X: adv.ClusterKeyParams.X,
- Y: adv.ClusterKeyParams.Y,
- },
- D: adv.ClusterKeyParams.D,
- })
-
- locCert := make([]byte, len(adv.ClusterCert))
- copy(locCert, adv.ClusterCert)
- c.localClusterCert.Store(locCert)
-
- cert, err := x509.ParseCertificate(adv.ClusterCert)
- if err != nil {
- c.logger.Error("failed parsing local cluster certificate", "error", err)
- return errwrap.Wrapf("error parsing local cluster certificate: {{err}}", err)
- }
-
- c.localClusterParsedCert.Store(cert)
-
- return nil
-}
-
-// setupCluster creates storage entries for holding Vault cluster information.
-// Entries will be created only if they are not already present. If clusterName
-// is not supplied, this method will auto-generate it.
-func (c *Core) setupCluster(ctx context.Context) error {
- // Prevent data races with the TLS parameters
- c.clusterParamsLock.Lock()
- defer c.clusterParamsLock.Unlock()
-
- // Check if storage index is already present or not
- cluster, err := c.Cluster(ctx)
- if err != nil {
- c.logger.Error("failed to get cluster details", "error", err)
- return err
- }
-
- var modified bool
-
- if cluster == nil {
- cluster = &Cluster{}
- }
-
- if cluster.Name == "" {
- // If cluster name is not supplied, generate one
- if c.clusterName == "" {
- c.logger.Debug("cluster name not found/set, generating new")
- clusterNameBytes, err := uuid.GenerateRandomBytes(4)
- if err != nil {
- c.logger.Error("failed to generate cluster name", "error", err)
- return err
- }
-
- c.clusterName = fmt.Sprintf("vault-cluster-%08x", clusterNameBytes)
- }
-
- cluster.Name = c.clusterName
- if c.logger.IsDebug() {
- c.logger.Debug("cluster name set", "name", cluster.Name)
- }
- modified = true
- }
-
- if cluster.ID == "" {
- c.logger.Debug("cluster ID not found, generating new")
- // Generate a clusterID
- cluster.ID, err = uuid.GenerateUUID()
- if err != nil {
- c.logger.Error("failed to generate cluster identifier", "error", err)
- return err
- }
- if c.logger.IsDebug() {
- c.logger.Debug("cluster ID set", "id", cluster.ID)
- }
- modified = true
- }
-
- // If we're using HA, generate server-to-server parameters
- if c.ha != nil {
- // Create a private key
- if c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey) == nil {
- c.logger.Debug("generating cluster private key")
- key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
- if err != nil {
- c.logger.Error("failed to generate local cluster key", "error", err)
- return err
- }
-
- c.localClusterPrivateKey.Store(key)
- }
-
- // Create a certificate
- if c.localClusterCert.Load().([]byte) == nil {
- c.logger.Debug("generating local cluster certificate")
-
- host, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- host = fmt.Sprintf("fw-%s", host)
- template := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: host,
- },
- DNSNames: []string{host},
- ExtKeyUsage: []x509.ExtKeyUsage{
- x509.ExtKeyUsageServerAuth,
- x509.ExtKeyUsageClientAuth,
- },
- KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
- SerialNumber: big.NewInt(mathrand.Int63()),
- NotBefore: time.Now().Add(-30 * time.Second),
- // 30 years of single-active uptime ought to be enough for anybody
- NotAfter: time.Now().Add(262980 * time.Hour),
- BasicConstraintsValid: true,
- IsCA: true,
- }
-
- certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey).Public(), c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey))
- if err != nil {
- c.logger.Error("error generating self-signed cert", "error", err)
- return errwrap.Wrapf("unable to generate local cluster certificate: {{err}}", err)
- }
-
- parsedCert, err := x509.ParseCertificate(certBytes)
- if err != nil {
- c.logger.Error("error parsing self-signed cert", "error", err)
- return errwrap.Wrapf("error parsing generated certificate: {{err}}", err)
- }
-
- c.localClusterCert.Store(certBytes)
- c.localClusterParsedCert.Store(parsedCert)
- }
- }
-
- if modified {
- // Encode the cluster information into as a JSON string
- rawCluster, err := json.Marshal(cluster)
- if err != nil {
- c.logger.Error("failed to encode cluster details", "error", err)
- return err
- }
-
- // Store it
- err = c.barrier.Put(ctx, &Entry{
- Key: coreLocalClusterInfoPath,
- Value: rawCluster,
- })
- if err != nil {
- c.logger.Error("failed to store cluster details", "error", err)
- return err
- }
- }
-
- return nil
-}
-
-// startClusterListener starts cluster request listeners during postunseal. It
-// is assumed that the state lock is held while this is run. Right now this
-// only starts forwarding listeners; it's TBD whether other request types will
-// be built in the same mechanism or started independently.
-func (c *Core) startClusterListener(ctx context.Context) error {
- if c.clusterAddr == "" {
- c.logger.Info("clustering disabled, not starting listeners")
- return nil
- }
-
- if c.clusterListenerAddrs == nil || len(c.clusterListenerAddrs) == 0 {
- c.logger.Warn("clustering not disabled but no addresses to listen on")
- return fmt.Errorf("cluster addresses not found")
- }
-
- c.logger.Debug("starting cluster listeners")
-
- err := c.startForwarding(ctx)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// stopClusterListener stops any existing listeners during preseal. It is
-// assumed that the state lock is held while this is run.
-func (c *Core) stopClusterListener() {
- if c.clusterAddr == "" {
-
- c.logger.Debug("clustering disabled, not stopping listeners")
- return
- }
-
- if !c.clusterListenersRunning {
- c.logger.Info("cluster listeners not running")
- return
- }
- c.logger.Info("stopping cluster listeners")
-
- // Tell the goroutine managing the listeners to perform the shutdown
- // process
- c.clusterListenerShutdownCh <- struct{}{}
-
- // The reason for this loop-de-loop is that we may be unsealing again
- // quickly, and if the listeners are not yet closed, we will get socket
- // bind errors. This ensures proper ordering.
-
- c.logger.Debug("waiting for success notification while stopping cluster listeners")
- <-c.clusterListenerShutdownSuccessCh
- c.clusterListenersRunning = false
-
- c.logger.Info("cluster listeners successfully shut down")
-}
-
-// ClusterTLSConfig generates a TLS configuration based on the local/replicated
-// cluster key and cert.
-func (c *Core) ClusterTLSConfig(ctx context.Context, repClusters *ReplicatedClusters, perfStandbyCluster *ReplicatedCluster) (*tls.Config, error) {
- // Using lookup functions allows just-in-time lookup of the current state
- // of clustering as connections come and go
-
- tlsConfig := &tls.Config{
- ClientAuth: tls.RequireAndVerifyClientCert,
- GetCertificate: clusterTLSServerLookup(ctx, c, repClusters, perfStandbyCluster),
- GetClientCertificate: clusterTLSClientLookup(ctx, c, repClusters, perfStandbyCluster),
- GetConfigForClient: clusterTLSServerConfigLookup(ctx, c, repClusters, perfStandbyCluster),
- MinVersion: tls.VersionTLS12,
- CipherSuites: c.clusterCipherSuites,
- }
-
- parsedCert := c.localClusterParsedCert.Load().(*x509.Certificate)
- currCert := c.localClusterCert.Load().([]byte)
- localCert := make([]byte, len(currCert))
- copy(localCert, currCert)
-
- if parsedCert != nil {
- tlsConfig.ServerName = parsedCert.Subject.CommonName
-
- pool := x509.NewCertPool()
- pool.AddCert(parsedCert)
- tlsConfig.RootCAs = pool
- tlsConfig.ClientCAs = pool
- }
-
- return tlsConfig, nil
-}
-
-func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
- c.clusterListenerAddrs = addrs
- if c.clusterAddr == "" && len(addrs) == 1 {
- c.clusterAddr = fmt.Sprintf("https://%s", addrs[0].String())
- }
-}
-
-func (c *Core) SetClusterHandler(handler http.Handler) {
- c.clusterHandler = handler
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/cluster_tls.go b/vendor/github.com/hashicorp/vault/vault/cluster_tls.go
deleted file mode 100644
index 4a63ecfa..00000000
--- a/vendor/github.com/hashicorp/vault/vault/cluster_tls.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/tls"
- "crypto/x509"
- "fmt"
-)
-
-var (
- clusterTLSServerLookup = func(ctx context.Context, c *Core, repClusters *ReplicatedClusters, _ *ReplicatedCluster) func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
- return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- c.logger.Debug("performing server cert lookup")
-
- switch {
- default:
- currCert := c.localClusterCert.Load().([]byte)
- if len(currCert) == 0 {
- return nil, fmt.Errorf("got forwarding connection but no local cert")
- }
-
- localCert := make([]byte, len(currCert))
- copy(localCert, currCert)
-
- return &tls.Certificate{
- Certificate: [][]byte{localCert},
- PrivateKey: c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey),
- Leaf: c.localClusterParsedCert.Load().(*x509.Certificate),
- }, nil
- }
- }
- }
-
- clusterTLSClientLookup = func(ctx context.Context, c *Core, repClusters *ReplicatedClusters, _ *ReplicatedCluster) func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
- return func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- if len(requestInfo.AcceptableCAs) != 1 {
- return nil, fmt.Errorf("expected only a single acceptable CA")
- }
-
- currCert := c.localClusterCert.Load().([]byte)
- if len(currCert) == 0 {
- return nil, fmt.Errorf("forwarding connection client but no local cert")
- }
-
- localCert := make([]byte, len(currCert))
- copy(localCert, currCert)
-
- return &tls.Certificate{
- Certificate: [][]byte{localCert},
- PrivateKey: c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey),
- Leaf: c.localClusterParsedCert.Load().(*x509.Certificate),
- }, nil
- }
- }
-
- clusterTLSServerConfigLookup = func(ctx context.Context, c *Core, repClusters *ReplicatedClusters, repCluster *ReplicatedCluster) func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) {
- return func(clientHello *tls.ClientHelloInfo) (*tls.Config, error) {
- //c.logger.Trace("performing server config lookup")
-
- caPool := x509.NewCertPool()
-
- ret := &tls.Config{
- ClientAuth: tls.RequireAndVerifyClientCert,
- GetCertificate: clusterTLSServerLookup(ctx, c, repClusters, repCluster),
- GetClientCertificate: clusterTLSClientLookup(ctx, c, repClusters, repCluster),
- MinVersion: tls.VersionTLS12,
- RootCAs: caPool,
- ClientCAs: caPool,
- NextProtos: clientHello.SupportedProtos,
- CipherSuites: c.clusterCipherSuites,
- }
-
- parsedCert := c.localClusterParsedCert.Load().(*x509.Certificate)
-
- if parsedCert == nil {
- return nil, fmt.Errorf("forwarding connection client but no local cert")
- }
-
- caPool.AddCert(parsedCert)
-
- return ret, nil
- }
- }
-)
diff --git a/vendor/github.com/hashicorp/vault/vault/core.go b/vendor/github.com/hashicorp/vault/vault/core.go
deleted file mode 100644
index 90cc8d56..00000000
--- a/vendor/github.com/hashicorp/vault/vault/core.go
+++ /dev/null
@@ -1,1713 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/subtle"
- "crypto/x509"
- "errors"
- "fmt"
- "net"
- "net/http"
- "net/url"
- "path/filepath"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/armon/go-metrics"
- log "github.com/hashicorp/go-hclog"
- "github.com/patrickmn/go-cache"
-
- "google.golang.org/grpc"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/logging"
- "github.com/hashicorp/vault/helper/mlock"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/reload"
- "github.com/hashicorp/vault/helper/tlsutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/shamir"
-)
-
-const (
- // CoreLockPath is the path used to acquire a coordinating lock
- // for a highly-available deploy.
- CoreLockPath = "core/lock"
-
- // The poison pill is used as a check during certain scenarios to indicate
- // to standby nodes that they should seal
- poisonPillPath = "core/poison-pill"
-
- // coreLeaderPrefix is the prefix used for the UUID that contains
- // the currently elected leader.
- coreLeaderPrefix = "core/leader/"
-
- // knownPrimaryAddrsPrefix is used to store last-known cluster address
- // information for primaries
- knownPrimaryAddrsPrefix = "core/primary-addrs/"
-
- // coreKeyringCanaryPath is used as a canary to indicate to replicated
- // clusters that they need to perform a rekey operation synchronously; this
- // isn't keyring-canary to avoid ignoring it when ignoring core/keyring
- coreKeyringCanaryPath = "core/canary-keyring"
-)
-
-var (
- // ErrAlreadyInit is returned if the core is already
- // initialized. This prevents a re-initialization.
- ErrAlreadyInit = errors.New("Vault is already initialized")
-
- // ErrNotInit is returned if a non-initialized barrier
- // is attempted to be unsealed.
- ErrNotInit = errors.New("Vault is not initialized")
-
- // ErrInternalError is returned when we don't want to leak
- // any information about an internal error
- ErrInternalError = errors.New("internal error")
-
- // ErrHANotEnabled is returned if the operation only makes sense
- // in an HA setting
- ErrHANotEnabled = errors.New("Vault is not configured for highly-available mode")
-
- // manualStepDownSleepPeriod is how long to sleep after a user-initiated
- // step down of the active node, to prevent instantly regrabbing the lock.
- // It's var not const so that tests can manipulate it.
- manualStepDownSleepPeriod = 10 * time.Second
-
- // Functions only in the Enterprise version
- enterprisePostUnseal = enterprisePostUnsealImpl
- enterprisePreSeal = enterprisePreSealImpl
- startReplication = startReplicationImpl
- stopReplication = stopReplicationImpl
- LastWAL = lastWALImpl
- LastRemoteWAL = lastRemoteWALImpl
- WaitUntilWALShipped = waitUntilWALShippedImpl
-)
-
-// NonFatalError is an error that can be returned during NewCore that should be
-// displayed but not cause a program exit
-type NonFatalError struct {
- Err error
-}
-
-func (e *NonFatalError) WrappedErrors() []error {
- return []error{e.Err}
-}
-
-func (e *NonFatalError) Error() string {
- return e.Err.Error()
-}
-
-// ErrInvalidKey is returned if there is a user-based error with a provided
-// unseal key. This will be shown to the user, so should not contain
-// information that is sensitive.
-type ErrInvalidKey struct {
- Reason string
-}
-
-func (e *ErrInvalidKey) Error() string {
- return fmt.Sprintf("invalid key: %v", e.Reason)
-}
-
-type RegisterAuthFunc func(context.Context, time.Duration, string, *logical.Auth) error
-
-type activeAdvertisement struct {
- RedirectAddr string `json:"redirect_addr"`
- ClusterAddr string `json:"cluster_addr,omitempty"`
- ClusterCert []byte `json:"cluster_cert,omitempty"`
- ClusterKeyParams *clusterKeyParams `json:"cluster_key_params,omitempty"`
-}
-
-type unlockInformation struct {
- Parts [][]byte
- Nonce string
-}
-
-// Core is used as the central manager of Vault activity. It is the primary point of
-// interface for API handlers and is responsible for managing the logical and physical
-// backends, router, security barrier, and audit trails.
-type Core struct {
- entCore
-
- // The registry of builtin plugins is passed in here as an interface because
- // if it's used directly, it results in import cycles.
- builtinRegistry BuiltinRegistry
-
- // N.B.: This is used to populate a dev token down replication, as
- // otherwise, after replication is started, a dev would have to go through
- // the generate-root process simply to talk to the new follower cluster.
- devToken string
-
- // HABackend may be available depending on the physical backend
- ha physical.HABackend
-
- // redirectAddr is the address we advertise as leader if held
- redirectAddr string
-
- // clusterAddr is the address we use for clustering
- clusterAddr string
-
- // physical backend is the un-trusted backend with durable data
- physical physical.Backend
-
- // seal is our seal, for seal configuration information
- seal Seal
-
- // migrationSeal is the seal to use during a migration operation. It is the
- // seal we're migrating *from*.
- migrationSeal Seal
-
- // barrier is the security barrier wrapping the physical backend
- barrier SecurityBarrier
-
- // router is responsible for managing the mount points for logical backends.
- router *Router
-
- // logicalBackends is the mapping of backends to use for this core
- logicalBackends map[string]logical.Factory
-
- // credentialBackends is the mapping of backends to use for this core
- credentialBackends map[string]logical.Factory
-
- // auditBackends is the mapping of backends to use for this core
- auditBackends map[string]audit.Factory
-
- // stateLock protects mutable state
- stateLock sync.RWMutex
- sealed *uint32
-
- standby bool
- perfStandby bool
- standbyDoneCh chan struct{}
- standbyStopCh chan struct{}
- manualStepDownCh chan struct{}
- keepHALockOnStepDown *uint32
- heldHALock physical.Lock
-
- // unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce
- unlockInfo *unlockInformation
-
- // generateRootProgress holds the shares until we reach enough
- // to verify the master key
- generateRootConfig *GenerateRootConfig
- generateRootProgress [][]byte
- generateRootLock sync.Mutex
-
- // These variables holds the config and shares we have until we reach
- // enough to verify the appropriate master key. Note that the same lock is
- // used; this isn't time-critical so this shouldn't be a problem.
- barrierRekeyConfig *SealConfig
- recoveryRekeyConfig *SealConfig
- rekeyLock sync.RWMutex
-
- // mounts is loaded after unseal since it is a protected
- // configuration
- mounts *MountTable
-
- // mountsLock is used to ensure that the mounts table does not
- // change underneath a calling function
- mountsLock sync.RWMutex
-
- // auth is loaded after unseal since it is a protected
- // configuration
- auth *MountTable
-
- // authLock is used to ensure that the auth table does not
- // change underneath a calling function
- authLock sync.RWMutex
-
- // audit is loaded after unseal since it is a protected
- // configuration
- audit *MountTable
-
- // auditLock is used to ensure that the audit table does not
- // change underneath a calling function
- auditLock sync.RWMutex
-
- // auditBroker is used to ingest the audit events and fan
- // out into the configured audit backends
- auditBroker *AuditBroker
-
- // auditedHeaders is used to configure which http headers
- // can be output in the audit logs
- auditedHeaders *AuditedHeadersConfig
-
- // systemBackend is the backend which is used to manage internal operations
- systemBackend *SystemBackend
-
- // cubbyholeBackend is the backend which manages the per-token storage
- cubbyholeBackend *CubbyholeBackend
-
- // systemBarrierView is the barrier view for the system backend
- systemBarrierView *BarrierView
-
- // expiration manager is used for managing LeaseIDs,
- // renewal, expiration and revocation
- expiration *ExpirationManager
-
- // rollback manager is used to run rollbacks periodically
- rollback *RollbackManager
-
- // policy store is used to manage named ACL policies
- policyStore *PolicyStore
-
- // token store is used to manage authentication tokens
- tokenStore *TokenStore
-
- // identityStore is used to manage client entities
- identityStore *IdentityStore
-
- // metricsCh is used to stop the metrics streaming
- metricsCh chan struct{}
-
- // metricsMutex is used to prevent a race condition between
- // metrics emission and sealing leading to a nil pointer
- metricsMutex sync.Mutex
-
- defaultLeaseTTL time.Duration
- maxLeaseTTL time.Duration
-
- // baseLogger is used to avoid ResetNamed as it strips useful prefixes in
- // e.g. testing
- baseLogger log.Logger
- logger log.Logger
-
- // cachingDisabled indicates whether caches are disabled
- cachingDisabled bool
- // Cache stores the actual cache; we always have this but may bypass it if
- // disabled
- physicalCache physical.ToggleablePurgemonster
-
- // reloadFuncs is a map containing reload functions
- reloadFuncs map[string][]reload.ReloadFunc
-
- // reloadFuncsLock controls access to the funcs
- reloadFuncsLock sync.RWMutex
-
- // wrappingJWTKey is the key used for generating JWTs containing response
- // wrapping information
- wrappingJWTKey *ecdsa.PrivateKey
-
- //
- // Cluster information
- //
- // Name
- clusterName string
- // Specific cipher suites to use for clustering, if any
- clusterCipherSuites []uint16
- // Used to modify cluster parameters
- clusterParamsLock sync.RWMutex
- // The private key stored in the barrier used for establishing
- // mutually-authenticated connections between Vault cluster members
- localClusterPrivateKey *atomic.Value
- // The local cluster cert
- localClusterCert *atomic.Value
- // The parsed form of the local cluster cert
- localClusterParsedCert *atomic.Value
- // The TCP addresses we should use for clustering
- clusterListenerAddrs []*net.TCPAddr
- // The handler to use for request forwarding
- clusterHandler http.Handler
- // Tracks whether cluster listeners are running, e.g. it's safe to send a
- // shutdown down the channel
- clusterListenersRunning bool
- // Shutdown channel for the cluster listeners
- clusterListenerShutdownCh chan struct{}
- // Shutdown success channel. We need this to be done serially to ensure
- // that binds are removed before they might be reinstated.
- clusterListenerShutdownSuccessCh chan struct{}
- // Write lock used to ensure that we don't have multiple connections adjust
- // this value at the same time
- requestForwardingConnectionLock sync.RWMutex
- // Most recent leader UUID. Used to avoid repeatedly JSON parsing the same
- // values.
- clusterLeaderUUID string
- // Most recent leader redirect addr
- clusterLeaderRedirectAddr string
- // Most recent leader cluster addr
- clusterLeaderClusterAddr string
- // Lock for the cluster leader values
- clusterLeaderParamsLock sync.RWMutex
- // Info on cluster members
- clusterPeerClusterAddrsCache *cache.Cache
- // Stores whether we currently have a server running
- rpcServerActive *uint32
- // The context for the client
- rpcClientConnContext context.Context
- // The function for canceling the client connection
- rpcClientConnCancelFunc context.CancelFunc
- // The grpc ClientConn for RPC calls
- rpcClientConn *grpc.ClientConn
- // The grpc forwarding client
- rpcForwardingClient *forwardingClient
-
- // CORS Information
- corsConfig *CORSConfig
-
- // The active set of upstream cluster addresses; stored via the Echo
- // mechanism, loaded by the balancer
- atomicPrimaryClusterAddrs *atomic.Value
-
- atomicPrimaryFailoverAddrs *atomic.Value
-
- // replicationState keeps the current replication state cached for quick
- // lookup; activeNodeReplicationState stores the active value on standbys
- replicationState *uint32
- activeNodeReplicationState *uint32
-
- // uiConfig contains UI configuration
- uiConfig *UIConfig
-
- // rawEnabled indicates whether the Raw endpoint is enabled
- rawEnabled bool
-
- // pluginDirectory is the location vault will look for plugin binaries
- pluginDirectory string
-
- // pluginCatalog is used to manage plugin configurations
- pluginCatalog *PluginCatalog
-
- enableMlock bool
-
- // This can be used to trigger operations to stop running when Vault is
- // going to be shut down, stepped down, or sealed
- activeContext context.Context
- activeContextCancelFunc *atomic.Value
-
- // Stores the sealunwrapper for downgrade needs
- sealUnwrapper physical.Backend
-
- // Stores any funcs that should be run on successful postUnseal
- postUnsealFuncs []func()
-
- // replicationFailure is used to mark when replication has entered an
- // unrecoverable failure.
- replicationFailure *uint32
-
- // disablePerfStanby is used to tell a standby not to attempt to become a
- // perf standby
- disablePerfStandby bool
-
- licensingStopCh chan struct{}
-
- // Stores loggers so we can reset the level
- allLoggers []log.Logger
- allLoggersLock sync.RWMutex
-}
-
-// CoreConfig is used to parameterize a core
-type CoreConfig struct {
- DevToken string `json:"dev_token" structs:"dev_token" mapstructure:"dev_token"`
-
- BuiltinRegistry BuiltinRegistry `json:"builtin_registry" structs:"builtin_registry" mapstructure:"builtin_registry"`
-
- LogicalBackends map[string]logical.Factory `json:"logical_backends" structs:"logical_backends" mapstructure:"logical_backends"`
-
- CredentialBackends map[string]logical.Factory `json:"credential_backends" structs:"credential_backends" mapstructure:"credential_backends"`
-
- AuditBackends map[string]audit.Factory `json:"audit_backends" structs:"audit_backends" mapstructure:"audit_backends"`
-
- Physical physical.Backend `json:"physical" structs:"physical" mapstructure:"physical"`
-
- // May be nil, which disables HA operations
- HAPhysical physical.HABackend `json:"ha_physical" structs:"ha_physical" mapstructure:"ha_physical"`
-
- Seal Seal `json:"seal" structs:"seal" mapstructure:"seal"`
-
- Logger log.Logger `json:"logger" structs:"logger" mapstructure:"logger"`
-
- // Disables the LRU cache on the physical backend
- DisableCache bool `json:"disable_cache" structs:"disable_cache" mapstructure:"disable_cache"`
-
- // Disables mlock syscall
- DisableMlock bool `json:"disable_mlock" structs:"disable_mlock" mapstructure:"disable_mlock"`
-
- // Custom cache size for the LRU cache on the physical backend, or zero for default
- CacheSize int `json:"cache_size" structs:"cache_size" mapstructure:"cache_size"`
-
- // Set as the leader address for HA
- RedirectAddr string `json:"redirect_addr" structs:"redirect_addr" mapstructure:"redirect_addr"`
-
- // Set as the cluster address for HA
- ClusterAddr string `json:"cluster_addr" structs:"cluster_addr" mapstructure:"cluster_addr"`
-
- DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
-
- MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
-
- ClusterName string `json:"cluster_name" structs:"cluster_name" mapstructure:"cluster_name"`
-
- ClusterCipherSuites string `json:"cluster_cipher_suites" structs:"cluster_cipher_suites" mapstructure:"cluster_cipher_suites"`
-
- EnableUI bool `json:"ui" structs:"ui" mapstructure:"ui"`
-
- // Enable the raw endpoint
- EnableRaw bool `json:"enable_raw" structs:"enable_raw" mapstructure:"enable_raw"`
-
- PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"`
-
- DisableSealWrap bool `json:"disable_sealwrap" structs:"disable_sealwrap" mapstructure:"disable_sealwrap"`
-
- ReloadFuncs *map[string][]reload.ReloadFunc
- ReloadFuncsLock *sync.RWMutex
-
- // Licensing
- LicensingConfig *LicensingConfig
- // Don't set this unless in dev mode, ideally only when using inmem
- DevLicenseDuration time.Duration
-
- DisablePerformanceStandby bool
- DisableIndexing bool
- DisableKeyEncodingChecks bool
-
- AllLoggers []log.Logger
-}
-
-func (c *CoreConfig) Clone() *CoreConfig {
- return &CoreConfig{
- DevToken: c.DevToken,
- LogicalBackends: c.LogicalBackends,
- CredentialBackends: c.CredentialBackends,
- AuditBackends: c.AuditBackends,
- Physical: c.Physical,
- HAPhysical: c.HAPhysical,
- Seal: c.Seal,
- Logger: c.Logger,
- DisableCache: c.DisableCache,
- DisableMlock: c.DisableMlock,
- CacheSize: c.CacheSize,
- RedirectAddr: c.RedirectAddr,
- ClusterAddr: c.ClusterAddr,
- DefaultLeaseTTL: c.DefaultLeaseTTL,
- MaxLeaseTTL: c.MaxLeaseTTL,
- ClusterName: c.ClusterName,
- ClusterCipherSuites: c.ClusterCipherSuites,
- EnableUI: c.EnableUI,
- EnableRaw: c.EnableRaw,
- PluginDirectory: c.PluginDirectory,
- DisableSealWrap: c.DisableSealWrap,
- ReloadFuncs: c.ReloadFuncs,
- ReloadFuncsLock: c.ReloadFuncsLock,
- LicensingConfig: c.LicensingConfig,
- DevLicenseDuration: c.DevLicenseDuration,
- DisablePerformanceStandby: c.DisablePerformanceStandby,
- DisableIndexing: c.DisableIndexing,
- AllLoggers: c.AllLoggers,
- }
-}
-
-// NewCore is used to construct a new core
-func NewCore(conf *CoreConfig) (*Core, error) {
- if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
- if conf.RedirectAddr == "" {
- return nil, fmt.Errorf("missing API address, please set in configuration or via environment")
- }
- }
-
- if conf.DefaultLeaseTTL == 0 {
- conf.DefaultLeaseTTL = defaultLeaseTTL
- }
- if conf.MaxLeaseTTL == 0 {
- conf.MaxLeaseTTL = maxLeaseTTL
- }
- if conf.DefaultLeaseTTL > conf.MaxLeaseTTL {
- return nil, fmt.Errorf("cannot have DefaultLeaseTTL larger than MaxLeaseTTL")
- }
-
- // Validate the advertise addr if its given to us
- if conf.RedirectAddr != "" {
- u, err := url.Parse(conf.RedirectAddr)
- if err != nil {
- return nil, errwrap.Wrapf("redirect address is not valid url: {{err}}", err)
- }
-
- if u.Scheme == "" {
- return nil, fmt.Errorf("redirect address must include scheme (ex. 'http')")
- }
- }
-
- // Make a default logger if not provided
- if conf.Logger == nil {
- conf.Logger = logging.NewVaultLogger(log.Trace)
- }
-
- // Setup the core
- c := &Core{
- entCore: entCore{},
- devToken: conf.DevToken,
- physical: conf.Physical,
- redirectAddr: conf.RedirectAddr,
- clusterAddr: conf.ClusterAddr,
- seal: conf.Seal,
- router: NewRouter(),
- sealed: new(uint32),
- standby: true,
- baseLogger: conf.Logger,
- logger: conf.Logger.Named("core"),
- defaultLeaseTTL: conf.DefaultLeaseTTL,
- maxLeaseTTL: conf.MaxLeaseTTL,
- cachingDisabled: conf.DisableCache,
- clusterName: conf.ClusterName,
- clusterListenerShutdownCh: make(chan struct{}),
- clusterListenerShutdownSuccessCh: make(chan struct{}),
- clusterPeerClusterAddrsCache: cache.New(3*HeartbeatInterval, time.Second),
- enableMlock: !conf.DisableMlock,
- rawEnabled: conf.EnableRaw,
- replicationState: new(uint32),
- rpcServerActive: new(uint32),
- atomicPrimaryClusterAddrs: new(atomic.Value),
- atomicPrimaryFailoverAddrs: new(atomic.Value),
- localClusterPrivateKey: new(atomic.Value),
- localClusterCert: new(atomic.Value),
- localClusterParsedCert: new(atomic.Value),
- activeNodeReplicationState: new(uint32),
- keepHALockOnStepDown: new(uint32),
- replicationFailure: new(uint32),
- disablePerfStandby: true,
- activeContextCancelFunc: new(atomic.Value),
- allLoggers: conf.AllLoggers,
- builtinRegistry: conf.BuiltinRegistry,
- }
-
- atomic.StoreUint32(c.sealed, 1)
- c.allLoggers = append(c.allLoggers, c.logger)
-
- atomic.StoreUint32(c.replicationState, uint32(consts.ReplicationDRDisabled|consts.ReplicationPerformanceDisabled))
- c.localClusterCert.Store(([]byte)(nil))
- c.localClusterParsedCert.Store((*x509.Certificate)(nil))
- c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil))
-
- c.activeContextCancelFunc.Store((context.CancelFunc)(nil))
-
- if conf.ClusterCipherSuites != "" {
- suites, err := tlsutil.ParseCiphers(conf.ClusterCipherSuites)
- if err != nil {
- return nil, errwrap.Wrapf("error parsing cluster cipher suites: {{err}}", err)
- }
- c.clusterCipherSuites = suites
- }
-
- // Load CORS config and provide a value for the core field.
- c.corsConfig = &CORSConfig{
- core: c,
- Enabled: new(uint32),
- }
-
- if c.seal == nil {
- c.seal = NewDefaultSeal()
- }
- c.seal.SetCore(c)
-
- if err := coreInit(c, conf); err != nil {
- return nil, err
- }
-
- if !conf.DisableMlock {
- // Ensure our memory usage is locked into physical RAM
- if err := mlock.LockMemory(); err != nil {
- return nil, fmt.Errorf(
- "Failed to lock memory: %v\n\n"+
- "This usually means that the mlock syscall is not available.\n"+
- "Vault uses mlock to prevent memory from being swapped to\n"+
- "disk. This requires root privileges as well as a machine\n"+
- "that supports mlock. Please enable mlock on your system or\n"+
- "disable Vault from using it. To disable Vault from using it,\n"+
- "set the `disable_mlock` configuration option in your configuration\n"+
- "file.",
- err)
- }
- }
-
- var err error
-
- if conf.PluginDirectory != "" {
- c.pluginDirectory, err = filepath.Abs(conf.PluginDirectory)
- if err != nil {
- return nil, errwrap.Wrapf("core setup failed, could not verify plugin directory: {{err}}", err)
- }
- }
-
- // Construct a new AES-GCM barrier
- c.barrier, err = NewAESGCMBarrier(c.physical)
- if err != nil {
- return nil, errwrap.Wrapf("barrier setup failed: {{err}}", err)
- }
-
- createSecondaries(c, conf)
-
- if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
- c.ha = conf.HAPhysical
- }
-
- // We create the funcs here, then populate the given config with it so that
- // the caller can share state
- conf.ReloadFuncsLock = &c.reloadFuncsLock
- c.reloadFuncsLock.Lock()
- c.reloadFuncs = make(map[string][]reload.ReloadFunc)
- c.reloadFuncsLock.Unlock()
- conf.ReloadFuncs = &c.reloadFuncs
-
- logicalBackends := make(map[string]logical.Factory)
- for k, f := range conf.LogicalBackends {
- logicalBackends[k] = f
- }
- _, ok := logicalBackends["kv"]
- if !ok {
- logicalBackends["kv"] = PassthroughBackendFactory
- }
-
- logicalBackends["cubbyhole"] = CubbyholeBackendFactory
- logicalBackends[systemMountType] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
- sysBackendLogger := conf.Logger.Named("system")
- c.AddLogger(sysBackendLogger)
- b := NewSystemBackend(c, sysBackendLogger)
- if err := b.Setup(ctx, config); err != nil {
- return nil, err
- }
- return b, nil
- }
- logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
- identityLogger := conf.Logger.Named("identity")
- c.AddLogger(identityLogger)
- return NewIdentityStore(ctx, c, config, identityLogger)
- }
- addExtraLogicalBackends(c, logicalBackends)
- c.logicalBackends = logicalBackends
-
- credentialBackends := make(map[string]logical.Factory)
- for k, f := range conf.CredentialBackends {
- credentialBackends[k] = f
- }
- credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
- tsLogger := conf.Logger.Named("token")
- c.AddLogger(tsLogger)
- return NewTokenStore(ctx, tsLogger, c, config)
- }
- addExtraCredentialBackends(c, credentialBackends)
- c.credentialBackends = credentialBackends
-
- auditBackends := make(map[string]audit.Factory)
- for k, f := range conf.AuditBackends {
- auditBackends[k] = f
- }
- c.auditBackends = auditBackends
-
- uiStoragePrefix := systemBarrierPrefix + "ui"
- c.uiConfig = NewUIConfig(conf.EnableUI, physical.NewView(c.physical, uiStoragePrefix), NewBarrierView(c.barrier, uiStoragePrefix))
-
- return c, nil
-}
-
-// Shutdown is invoked when the Vault instance is about to be terminated. It
-// should not be accessible as part of an API call as it will cause an availability
-// problem. It is only used to gracefully quit in the case of HA so that failover
-// happens as quickly as possible.
-func (c *Core) Shutdown() error {
- c.logger.Debug("shutdown called")
- return c.sealInternal()
-}
-
-// CORSConfig returns the current CORS configuration
-func (c *Core) CORSConfig() *CORSConfig {
- return c.corsConfig
-}
-
-func (c *Core) GetContext() (context.Context, context.CancelFunc) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
-
- return context.WithCancel(namespace.RootContext(c.activeContext))
-}
-
-// Sealed checks if the Vault is current sealed
-func (c *Core) Sealed() bool {
- return atomic.LoadUint32(c.sealed) == 1
-}
-
-// SecretProgress returns the number of keys provided so far
-func (c *Core) SecretProgress() (int, string) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- switch c.unlockInfo {
- case nil:
- return 0, ""
- default:
- return len(c.unlockInfo.Parts), c.unlockInfo.Nonce
- }
-}
-
-// ResetUnsealProcess removes the current unlock parts from memory, to reset
-// the unsealing process
-func (c *Core) ResetUnsealProcess() {
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
- c.unlockInfo = nil
-}
-
-// Unseal is used to provide one of the key parts to unseal the Vault.
-//
-// They key given as a parameter will automatically be zerod after
-// this method is done with it. If you want to keep the key around, a copy
-// should be made.
-func (c *Core) Unseal(key []byte) (bool, error) {
- return c.unseal(key, false)
-}
-
-func (c *Core) UnsealWithRecoveryKeys(key []byte) (bool, error) {
- return c.unseal(key, true)
-}
-
-func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
- defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now())
-
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
-
- ctx := context.Background()
-
- // Explicitly check for init status. This also checks if the seal
- // configuration is valid (i.e. non-nil).
- init, err := c.Initialized(ctx)
- if err != nil {
- return false, err
- }
- if !init {
- return false, ErrNotInit
- }
-
- // Verify the key length
- min, max := c.barrier.KeyLength()
- max += shamir.ShareOverhead
- if len(key) < min {
- return false, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
- }
- if len(key) > max {
- return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
- }
-
- // Check if already unsealed
- if !c.Sealed() {
- return true, nil
- }
-
- sealToUse := c.seal
- if c.migrationSeal != nil {
- sealToUse = c.migrationSeal
- }
-
- masterKey, err := c.unsealPart(ctx, sealToUse, key, useRecoveryKeys)
- if err != nil {
- return false, err
- }
- if masterKey != nil {
- return c.unsealInternal(ctx, masterKey)
- }
-
- return false, nil
-}
-
-// unsealPart takes in a key share, and returns the master key if the threshold
-// is met. If recovery keys are supported, recovery key shares may be provided.
-func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecoveryKeys bool) ([]byte, error) {
- // Check if we already have this piece
- if c.unlockInfo != nil {
- for _, existing := range c.unlockInfo.Parts {
- if subtle.ConstantTimeCompare(existing, key) == 1 {
- return nil, nil
- }
- }
- } else {
- uuid, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- c.unlockInfo = &unlockInformation{
- Nonce: uuid,
- }
- }
-
- // Store this key
- c.unlockInfo.Parts = append(c.unlockInfo.Parts, key)
-
- var config *SealConfig
- var err error
- if seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil) {
- config, err = seal.RecoveryConfig(ctx)
- } else {
- config, err = seal.BarrierConfig(ctx)
- }
- if err != nil {
- return nil, err
- }
-
- // Check if we don't have enough keys to unlock, proceed through the rest of
- // the call only if we have met the threshold
- if len(c.unlockInfo.Parts) < config.SecretThreshold {
- if c.logger.IsDebug() {
- c.logger.Debug("cannot unseal, not enough keys", "keys", len(c.unlockInfo.Parts), "threshold", config.SecretThreshold, "nonce", c.unlockInfo.Nonce)
- }
- return nil, nil
- }
-
- // Best-effort memzero of unlock parts once we're done with them
- defer func() {
- for i := range c.unlockInfo.Parts {
- memzero(c.unlockInfo.Parts[i])
- }
- c.unlockInfo = nil
- }()
-
- // Recover the split key. recoveredKey is the shamir combined
- // key, or the single provided key if the threshold is 1.
- var recoveredKey []byte
- var masterKey []byte
- var recoveryKey []byte
- if config.SecretThreshold == 1 {
- recoveredKey = make([]byte, len(c.unlockInfo.Parts[0]))
- copy(recoveredKey, c.unlockInfo.Parts[0])
- } else {
- recoveredKey, err = shamir.Combine(c.unlockInfo.Parts)
- if err != nil {
- return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err)
- }
- }
-
- if seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil) {
- // Verify recovery key
- if err := seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
- return nil, err
- }
- recoveryKey = recoveredKey
-
- // Get stored keys and shamir combine into single master key. Unsealing with
- // recovery keys currently does not support: 1) mixed stored and non-stored
- // keys setup, nor 2) seals that support recovery keys but not stored keys.
- // If insufficient shares are provided, shamir.Combine will error, and if
- // no stored keys are found it will return masterKey as nil.
- if seal.StoredKeysSupported() {
- masterKeyShares, err := seal.GetStoredKeys(ctx)
- if err != nil {
- return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err)
- }
-
- if len(masterKeyShares) == 1 {
- masterKey = masterKeyShares[0]
- } else {
- masterKey, err = shamir.Combine(masterKeyShares)
- if err != nil {
- return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err)
- }
- }
- }
- } else {
- masterKey = recoveredKey
- }
-
- // If we have a migration seal, now's the time!
- if c.migrationSeal != nil {
- // Unseal the barrier so we can rekey
- if err := c.barrier.Unseal(ctx, masterKey); err != nil {
- return nil, errwrap.Wrapf("error unsealing barrier with constructed master key: {{err}}", err)
- }
- defer c.barrier.Seal()
-
- // The seal used in this function will have been the migration seal,
- // and c.seal will be the opposite type, so there are two
- // possibilities: Shamir to auto, and auto to Shamir.
- if !seal.RecoveryKeySupported() {
- // The new seal will have recovery keys; we set it to the existing
- // master key, so barrier key shares -> recovery key shares
- if err := c.seal.SetRecoveryKey(ctx, masterKey); err != nil {
- return nil, errwrap.Wrapf("error setting new recovery key information: {{err}}", err)
- }
-
- // Generate a new master key
- newMasterKey, err := c.barrier.GenerateKey()
- if err != nil {
- return nil, errwrap.Wrapf("error generating new master key: {{err}}", err)
- }
-
- // Rekey the barrier
- if err := c.barrier.Rekey(ctx, newMasterKey); err != nil {
- return nil, errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err)
- }
-
- // Store the new master key
- if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil {
- return nil, errwrap.Wrapf("error storing new master key: {[err}}", err)
- }
-
- // Return the new key so it can be used to unlock the barrier
- masterKey = newMasterKey
- } else {
- // In this case we have to ensure that the recovery information was
- // set properly.
- if recoveryKey == nil {
- return nil, errors.New("did not get expected recovery information to set new seal during migration")
- }
-
- // Auto to Shamir. We have recovery keys; we're going to use them
- // as the new barrier key
- if err := c.barrier.Rekey(ctx, recoveryKey); err != nil {
- return nil, errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err)
- }
-
- if err := c.barrier.Delete(ctx, StoredBarrierKeysPath); err != nil {
- // Don't actually exit here as successful deletion isn't critical
- c.logger.Error("error deleting stored barrier keys after migration; continuing anyways", "error", err)
- }
-
- masterKey = recoveryKey
- }
-
- // At this point we've swapped things around and need to ensure we
- // don't migrate again
- c.migrationSeal = nil
-
- // Ensure we populate the new values
- bc, err := c.seal.BarrierConfig(ctx)
- if err != nil {
- return nil, errwrap.Wrapf("error fetching barrier config after migration: {{err}}", err)
- }
- if err := c.seal.SetBarrierConfig(ctx, bc); err != nil {
- return nil, errwrap.Wrapf("error storing barrier config after migration: {{err}}", err)
- }
-
- if c.seal.RecoveryKeySupported() {
- rc, err := c.seal.RecoveryConfig(ctx)
- if err != nil {
- return nil, errwrap.Wrapf("error fetching recovery config after migration: {{err}}", err)
- }
- if err := c.seal.SetRecoveryConfig(ctx, rc); err != nil {
- return nil, errwrap.Wrapf("error storing recovery config after migration: {{err}}", err)
- }
- }
- }
-
- return masterKey, nil
-}
-
-// unsealInternal takes in the master key and attempts to unseal the barrier.
-// N.B.: This must be called with the state write lock held.
-func (c *Core) unsealInternal(ctx context.Context, masterKey []byte) (bool, error) {
- defer memzero(masterKey)
-
- // Attempt to unlock
- if err := c.barrier.Unseal(ctx, masterKey); err != nil {
- return false, err
- }
- if c.logger.IsInfo() {
- c.logger.Info("vault is unsealed")
- }
-
- if err := preUnsealInternal(ctx, c); err != nil {
- return false, err
- }
-
- // Do post-unseal setup if HA is not enabled
- if c.ha == nil {
- // We still need to set up cluster info even if it's not part of a
- // cluster right now. This also populates the cached cluster object.
- if err := c.setupCluster(ctx); err != nil {
- c.logger.Error("cluster setup failed", "error", err)
- c.barrier.Seal()
- c.logger.Warn("vault is sealed")
- return false, err
- }
-
- ctx, ctxCancel := context.WithCancel(namespace.RootContext(nil))
- if err := c.postUnseal(ctx, ctxCancel, standardUnsealStrategy{}); err != nil {
- c.logger.Error("post-unseal setup failed", "error", err)
- c.barrier.Seal()
- c.logger.Warn("vault is sealed")
- return false, err
- }
-
- c.standby = false
- } else {
- // Go to standby mode, wait until we are active to unseal
- c.standbyDoneCh = make(chan struct{})
- c.manualStepDownCh = make(chan struct{})
- c.standbyStopCh = make(chan struct{})
- go c.runStandby(c.standbyDoneCh, c.manualStepDownCh, c.standbyStopCh)
- }
-
- // Force a cache bust here, which will also run migration code
- if c.seal.RecoveryKeySupported() {
- c.seal.SetRecoveryConfig(ctx, nil)
- }
-
- // Success!
- atomic.StoreUint32(c.sealed, 0)
-
- if c.ha != nil {
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifySealedStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("failed to notify unsealed status", "error", err)
- }
- }
- }
- }
- return true, nil
-}
-
-// SealWithRequest takes in a logical.Request, acquires the lock, and passes
-// through to sealInternal
-func (c *Core) SealWithRequest(httpCtx context.Context, req *logical.Request) error {
- defer metrics.MeasureSince([]string{"core", "seal-with-request"}, time.Now())
-
- if c.Sealed() {
- return nil
- }
-
- c.stateLock.RLock()
-
- // This will unlock the read lock
- // We use background context since we may not be active
- ctx, cancel := context.WithCancel(namespace.RootContext(nil))
- defer cancel()
-
- go func() {
- select {
- case <-ctx.Done():
- case <-httpCtx.Done():
- cancel()
- }
- }()
-
- // This will unlock the read lock
- return c.sealInitCommon(ctx, req)
-}
-
-// Seal takes in a token and creates a logical.Request, acquires the lock, and
-// passes through to sealInternal
-func (c *Core) Seal(token string) error {
- defer metrics.MeasureSince([]string{"core", "seal"}, time.Now())
-
- if c.Sealed() {
- return nil
- }
-
- c.stateLock.RLock()
-
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "sys/seal",
- ClientToken: token,
- }
-
- // This will unlock the read lock
- // We use background context since we may not be active
- return c.sealInitCommon(namespace.RootContext(nil), req)
-}
-
-// sealInitCommon is common logic for Seal and SealWithRequest and is used to
-// re-seal the Vault. This requires the Vault to be unsealed again to perform
-// any further operations. Note: this function will read-unlock the state lock.
-func (c *Core) sealInitCommon(ctx context.Context, req *logical.Request) (retErr error) {
- defer metrics.MeasureSince([]string{"core", "seal-internal"}, time.Now())
-
- if req == nil {
- retErr = multierror.Append(retErr, errors.New("nil request to seal"))
- c.stateLock.RUnlock()
- return retErr
- }
-
- // Since there is no token store in standby nodes, sealing cannot be done.
- // Ideally, the request has to be forwarded to leader node for validation
- // and the operation should be performed. But for now, just returning with
- // an error and recommending a vault restart, which essentially does the
- // same thing.
- if c.standby {
- c.logger.Error("vault cannot seal when in standby mode; please restart instead")
- retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead"))
- c.stateLock.RUnlock()
- return retErr
- }
-
- acl, te, entity, identityPolicies, err := c.fetchACLTokenEntryAndEntity(ctx, req)
- if err != nil {
- if errwrap.ContainsType(err, new(TemplateError)) {
- c.logger.Warn("permission denied due to a templated policy being invalid or containing directives not satisfied by the requestor", "error", err)
- err = logical.ErrPermissionDenied
- }
- retErr = multierror.Append(retErr, err)
- c.stateLock.RUnlock()
- return retErr
- }
-
- req.SetTokenEntry(te)
-
- // Audit-log the request before going any further
- auth := &logical.Auth{
- ClientToken: req.ClientToken,
- Accessor: req.ClientTokenAccessor,
- }
- if te != nil {
- auth.IdentityPolicies = identityPolicies[te.NamespaceID]
- delete(identityPolicies, te.NamespaceID)
- auth.ExternalNamespacePolicies = identityPolicies
- auth.TokenPolicies = te.Policies
- auth.Policies = append(te.Policies, identityPolicies[te.NamespaceID]...)
- auth.Metadata = te.Meta
- auth.DisplayName = te.DisplayName
- auth.EntityID = te.EntityID
- auth.TokenType = te.Type
- }
-
- logInput := &audit.LogInput{
- Auth: auth,
- Request: req,
- }
- if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
- c.logger.Error("failed to audit request", "request_path", req.Path, "error", err)
- retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
- c.stateLock.RUnlock()
- return retErr
- }
-
- if entity != nil && entity.Disabled {
- c.logger.Warn("permission denied as the entity on the token is disabled")
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
- if te != nil && te.EntityID != "" && entity == nil {
- c.logger.Warn("permission denied as the entity on the token is invalid")
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
-
- // Attempt to use the token (decrement num_uses)
- // On error bail out; if the token has been revoked, bail out too
- if te != nil {
- te, err = c.tokenStore.UseToken(ctx, te)
- if err != nil {
- c.logger.Error("failed to use token", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- c.stateLock.RUnlock()
- return retErr
- }
- if te == nil {
- // Token is no longer valid
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
- }
-
- // Verify that this operation is allowed
- authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
- RootPrivsRequired: true,
- })
- if !authResults.Allowed {
- c.stateLock.RUnlock()
- retErr = multierror.Append(retErr, authResults.Error)
- if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError {
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- }
- return retErr
- }
-
- if te != nil && te.NumUses == tokenRevocationPending {
- // Token needs to be revoked. We do this immediately here because
- // we won't have a token store after sealing.
- leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(c.activeContext, te)
- if err == nil {
- err = c.expiration.Revoke(c.activeContext, leaseID)
- }
- if err != nil {
- c.logger.Error("token needed revocation before seal but failed to revoke", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- }
- }
-
- // Unlock; sealing will grab the lock when needed
- c.stateLock.RUnlock()
-
- sealErr := c.sealInternal()
-
- if sealErr != nil {
- retErr = multierror.Append(retErr, sealErr)
- }
-
- return
-}
-
-// UIEnabled returns if the UI is enabled
-func (c *Core) UIEnabled() bool {
- return c.uiConfig.Enabled()
-}
-
-// UIHeaders returns configured UI headers
-func (c *Core) UIHeaders() (http.Header, error) {
- return c.uiConfig.Headers(context.Background())
-}
-
-// sealInternal is an internal method used to seal the vault. It does not do
-// any authorization checking.
-func (c *Core) sealInternal() error {
- return c.sealInternalWithOptions(true, false)
-}
-
-func (c *Core) sealInternalWithOptions(grabStateLock, keepHALock bool) error {
- // Mark sealed, and if already marked return
- if swapped := atomic.CompareAndSwapUint32(c.sealed, 0, 1); !swapped {
- return nil
- }
-
- c.logger.Info("marked as sealed")
-
- // Clear forwarding clients
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
-
- activeCtxCancel := c.activeContextCancelFunc.Load().(context.CancelFunc)
- cancelCtxAndLock := func() {
- doneCh := make(chan struct{})
- go func() {
- select {
- case <-doneCh:
- // Attempt to drain any inflight requests
- case <-time.After(DefaultMaxRequestDuration):
- if activeCtxCancel != nil {
- activeCtxCancel()
- }
- }
- }()
-
- c.stateLock.Lock()
- close(doneCh)
- // Stop requests from processing
- if activeCtxCancel != nil {
- activeCtxCancel()
- }
- }
-
- // Do pre-seal teardown if HA is not enabled
- if c.ha == nil {
- if grabStateLock {
- cancelCtxAndLock()
- defer c.stateLock.Unlock()
- }
- // Even in a non-HA context we key off of this for some things
- c.standby = true
-
- // Stop requests from processing
- if activeCtxCancel != nil {
- activeCtxCancel()
- }
-
- if err := c.preSeal(); err != nil {
- c.logger.Error("pre-seal teardown failed", "error", err)
- return fmt.Errorf("internal error")
- }
- } else {
- // If we are keeping the lock we already have the state write lock
- // held. Otherwise grab it here so that when stopCh is triggered we are
- // locked.
- if keepHALock {
- atomic.StoreUint32(c.keepHALockOnStepDown, 1)
- }
- if grabStateLock {
- cancelCtxAndLock()
- defer c.stateLock.Unlock()
- }
-
- // If we are trying to acquire the lock, force it to return with nil so
- // runStandby will exit
- // If we are active, signal the standby goroutine to shut down and wait
- // for completion. We have the state lock here so nothing else should
- // be toggling standby status.
- close(c.standbyStopCh)
- c.logger.Debug("finished triggering standbyStopCh for runStandby")
-
- // Wait for runStandby to stop
- <-c.standbyDoneCh
- atomic.StoreUint32(c.keepHALockOnStepDown, 0)
- c.logger.Debug("runStandby done")
- }
-
- c.logger.Debug("sealing barrier")
- if err := c.barrier.Seal(); err != nil {
- c.logger.Error("error sealing barrier", "error", err)
- return err
- }
-
- if c.ha != nil {
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifySealedStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("failed to notify sealed status", "error", err)
- }
- }
- }
- }
-
- postSealInternal(c)
-
- c.logger.Info("vault is sealed")
-
- return nil
-}
-
-type UnsealStrategy interface {
- unseal(context.Context, log.Logger, *Core) error
-}
-
-type standardUnsealStrategy struct{}
-
-func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c *Core) error {
- // Clear forwarding clients; we're active
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
-
- if err := postUnsealPhysical(c); err != nil {
- return err
- }
-
- if err := enterprisePostUnseal(c); err != nil {
- return err
- }
-
- if !c.IsDRSecondary() {
- if err := c.ensureWrappingKey(ctx); err != nil {
- return err
- }
- }
- if err := c.setupPluginCatalog(ctx); err != nil {
- return err
- }
- if err := c.loadMounts(ctx); err != nil {
- return err
- }
- if err := c.setupMounts(ctx); err != nil {
- return err
- }
- if err := c.setupPolicyStore(ctx); err != nil {
- return err
- }
- if err := c.loadCORSConfig(ctx); err != nil {
- return err
- }
- if err := c.loadCredentials(ctx); err != nil {
- return err
- }
- if err := c.setupCredentials(ctx); err != nil {
- return err
- }
- if !c.IsDRSecondary() {
- if err := c.startRollback(); err != nil {
- return err
- }
- if err := c.setupExpiration(expireLeaseStrategyRevoke); err != nil {
- return err
- }
- if err := c.loadAudits(ctx); err != nil {
- return err
- }
- if err := c.setupAudits(ctx); err != nil {
- return err
- }
- if err := c.loadIdentityStoreArtifacts(ctx); err != nil {
- return err
- }
- if err := loadMFAConfigs(ctx, c); err != nil {
- return err
- }
- if err := c.setupAuditedHeadersConfig(ctx); err != nil {
- return err
- }
- } else {
- c.auditBroker = NewAuditBroker(c.logger)
- }
-
- if c.ha != nil || shouldStartClusterListener(c) {
- if err := c.startClusterListener(ctx); err != nil {
- return err
- }
- }
-
- c.clusterParamsLock.Lock()
- defer c.clusterParamsLock.Unlock()
- if err := startReplication(c); err != nil {
- return err
- }
-
- return nil
-}
-
-// postUnseal is invoked after the barrier is unsealed, but before
-// allowing any user operations. This allows us to setup any state that
-// requires the Vault to be unsealed such as mount tables, logical backends,
-// credential stores, etc.
-func (c *Core) postUnseal(ctx context.Context, ctxCancelFunc context.CancelFunc, unsealer UnsealStrategy) (retErr error) {
- defer metrics.MeasureSince([]string{"core", "post_unseal"}, time.Now())
-
- // Clear any out
- c.postUnsealFuncs = nil
-
- // Create a new request context
- c.activeContext = ctx
- c.activeContextCancelFunc.Store(ctxCancelFunc)
-
- defer func() {
- if retErr != nil {
- ctxCancelFunc()
- c.preSeal()
- }
- }()
- c.logger.Info("post-unseal setup starting")
-
- // Enable the cache
- c.physicalCache.Purge(ctx)
- if !c.cachingDisabled {
- c.physicalCache.SetEnabled(true)
- }
-
- // Purge these for safety in case of a rekey
- c.seal.SetBarrierConfig(ctx, nil)
- if c.seal.RecoveryKeySupported() {
- c.seal.SetRecoveryConfig(ctx, nil)
- }
-
- if err := unsealer.unseal(ctx, c.logger, c); err != nil {
- return err
- }
-
- c.metricsCh = make(chan struct{})
- go c.emitMetrics(c.metricsCh)
-
- // This is intentionally the last block in this function. We want to allow
- // writes just before allowing client requests, to ensure everything has
- // been set up properly before any writes can have happened.
- for _, v := range c.postUnsealFuncs {
- v()
- }
-
- c.logger.Info("post-unseal setup complete")
- return nil
-}
-
-// preSeal is invoked before the barrier is sealed, allowing
-// for any state teardown required.
-func (c *Core) preSeal() error {
- defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now())
- c.logger.Info("pre-seal teardown starting")
-
- // Clear any pending funcs
- c.postUnsealFuncs = nil
-
- // Clear any rekey progress
- c.barrierRekeyConfig = nil
- c.recoveryRekeyConfig = nil
-
- if c.metricsCh != nil {
- close(c.metricsCh)
- c.metricsCh = nil
- }
- var result error
-
- c.clusterParamsLock.Lock()
- if err := stopReplication(c); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error stopping replication: {{err}}", err))
- }
- c.clusterParamsLock.Unlock()
-
- c.stopClusterListener()
-
- if err := c.teardownAudits(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err))
- }
- if err := c.stopExpiration(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err))
- }
- if err := c.teardownCredentials(context.Background()); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err))
- }
- if err := c.teardownPolicyStore(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error tearing down policy store: {{err}}", err))
- }
- if err := c.stopRollback(); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error stopping rollback: {{err}}", err))
- }
- if err := c.unloadMounts(context.Background()); err != nil {
- result = multierror.Append(result, errwrap.Wrapf("error unloading mounts: {{err}}", err))
- }
- if err := enterprisePreSeal(c); err != nil {
- result = multierror.Append(result, err)
- }
-
- preSealPhysical(c)
-
- c.logger.Info("pre-seal teardown complete")
- return result
-}
-
-func enterprisePostUnsealImpl(c *Core) error {
- return nil
-}
-
-func enterprisePreSealImpl(c *Core) error {
- return nil
-}
-
-func startReplicationImpl(c *Core) error {
- return nil
-}
-
-func stopReplicationImpl(c *Core) error {
- return nil
-}
-
-// emitMetrics is used to periodically expose metrics while running
-func (c *Core) emitMetrics(stopCh chan struct{}) {
- for {
- select {
- case <-time.After(time.Second):
- c.metricsMutex.Lock()
- if c.expiration != nil {
- c.expiration.emitMetrics()
- }
- c.metricsMutex.Unlock()
- case <-stopCh:
- return
- }
- }
-}
-
-func (c *Core) ReplicationState() consts.ReplicationState {
- return consts.ReplicationState(atomic.LoadUint32(c.replicationState))
-}
-
-func (c *Core) ActiveNodeReplicationState() consts.ReplicationState {
- return consts.ReplicationState(atomic.LoadUint32(c.activeNodeReplicationState))
-}
-
-func (c *Core) SealAccess() *SealAccess {
- return NewSealAccess(c.seal)
-}
-
-func (c *Core) Logger() log.Logger {
- return c.logger
-}
-
-func (c *Core) BarrierKeyLength() (min, max int) {
- min, max = c.barrier.KeyLength()
- max += shamir.ShareOverhead
- return
-}
-
-func (c *Core) AuditedHeadersConfig() *AuditedHeadersConfig {
- return c.auditedHeaders
-}
-
-func waitUntilWALShippedImpl(ctx context.Context, c *Core, index uint64) bool {
- return true
-}
-
-func lastWALImpl(c *Core) uint64 {
- return 0
-}
-
-func lastRemoteWALImpl(c *Core) uint64 {
- return 0
-}
-
-func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfig, error) {
- pe, err := c.physical.Get(ctx, barrierSealConfigPath)
- if err != nil {
- return nil, nil, errwrap.Wrapf("failed to fetch barrier seal configuration at migration check time: {{err}}", err)
- }
- if pe == nil {
- return nil, nil, nil
- }
-
- barrierConf := new(SealConfig)
-
- if err := jsonutil.DecodeJSON(pe.Value, barrierConf); err != nil {
- return nil, nil, errwrap.Wrapf("failed to decode barrier seal configuration at migration check time: {{err}}", err)
- }
-
- var recoveryConf *SealConfig
- pe, err = c.physical.Get(ctx, recoverySealConfigPlaintextPath)
- if err != nil {
- return nil, nil, errwrap.Wrapf("failed to fetch seal configuration at migration check time: {{err}}", err)
- }
- if pe != nil {
- recoveryConf = &SealConfig{}
- if err := jsonutil.DecodeJSON(pe.Value, recoveryConf); err != nil {
- return nil, nil, errwrap.Wrapf("failed to decode seal configuration at migration check time: {{err}}", err)
- }
- }
-
- return barrierConf, recoveryConf, nil
-}
-
-func (c *Core) SetSealsForMigration(migrationSeal, newSeal Seal) {
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
- c.migrationSeal = migrationSeal
- c.seal = newSeal
- c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal")
-}
-
-func (c *Core) IsInSealMigration() bool {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- return c.migrationSeal != nil
-}
-
-func (c *Core) BarrierEncryptorAccess() *BarrierEncryptorAccess {
- return NewBarrierEncryptorAccess(c.barrier)
-}
-
-func (c *Core) PhysicalAccess() *physical.PhysicalAccess {
- return physical.NewPhysicalAccess(c.physical)
-}
-
-func (c *Core) RouterAccess() *RouterAccess {
- return NewRouterAccess(c)
-}
-
-// IsDRSecondary returns if the current cluster state is a DR secondary.
-func (c *Core) IsDRSecondary() bool {
- return c.ReplicationState().HasState(consts.ReplicationDRSecondary)
-}
-
-func (c *Core) AddLogger(logger log.Logger) {
- c.allLoggersLock.Lock()
- defer c.allLoggersLock.Unlock()
- c.allLoggers = append(c.allLoggers, logger)
-}
-
-func (c *Core) SetLogLevel(level log.Level) {
- c.allLoggersLock.RLock()
- defer c.allLoggersLock.RUnlock()
- for _, logger := range c.allLoggers {
- logger.SetLevel(level)
- }
-}
-
-// BuiltinRegistry is an interface that allows the "vault" package to use
-// the registry of builtin plugins without getting an import cycle. It
-// also allows for mocking the registry easily.
-type BuiltinRegistry interface {
- Contains(name string, pluginType consts.PluginType) bool
- Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool)
- Keys(pluginType consts.PluginType) []string
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/core_util.go b/vendor/github.com/hashicorp/vault/vault/core_util.go
deleted file mode 100644
index af3fff1a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/core_util.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
-
- "github.com/hashicorp/vault/helper/license"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
-)
-
-type entCore struct{}
-
-type LicensingConfig struct{}
-
-func coreInit(c *Core, conf *CoreConfig) error {
- phys := conf.Physical
- _, txnOK := phys.(physical.Transactional)
- sealUnwrapperLogger := conf.Logger.Named("storage.sealunwrapper")
- c.allLoggers = append(c.allLoggers, sealUnwrapperLogger)
- c.sealUnwrapper = NewSealUnwrapper(phys, sealUnwrapperLogger)
- // Wrap the physical backend in a cache layer if enabled
- cacheLogger := c.baseLogger.Named("storage.cache")
- c.allLoggers = append(c.allLoggers, cacheLogger)
- if txnOK {
- c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger)
- } else {
- c.physical = physical.NewCache(c.sealUnwrapper, conf.CacheSize, cacheLogger)
- }
- c.physicalCache = c.physical.(physical.ToggleablePurgemonster)
-
- // Wrap in encoding checks
- if !conf.DisableKeyEncodingChecks {
- c.physical = physical.NewStorageEncoding(c.physical)
- }
- return nil
-}
-
-func createSecondaries(*Core, *CoreConfig) {}
-
-func addExtraLogicalBackends(*Core, map[string]logical.Factory) {}
-
-func addExtraCredentialBackends(*Core, map[string]logical.Factory) {}
-
-func preUnsealInternal(context.Context, *Core) error { return nil }
-
-func postSealInternal(*Core) {}
-
-func preSealPhysical(c *Core) {
- switch c.sealUnwrapper.(type) {
- case *sealUnwrapper:
- c.sealUnwrapper.(*sealUnwrapper).stopUnwraps()
- case *transactionalSealUnwrapper:
- c.sealUnwrapper.(*transactionalSealUnwrapper).stopUnwraps()
- }
-
- // Purge the cache
- c.physicalCache.SetEnabled(false)
- c.physicalCache.Purge(context.Background())
-}
-
-func postUnsealPhysical(c *Core) error {
- switch c.sealUnwrapper.(type) {
- case *sealUnwrapper:
- c.sealUnwrapper.(*sealUnwrapper).runUnwraps()
- case *transactionalSealUnwrapper:
- c.sealUnwrapper.(*transactionalSealUnwrapper).runUnwraps()
- }
- return nil
-}
-
-func loadMFAConfigs(context.Context, *Core) error { return nil }
-
-func shouldStartClusterListener(*Core) bool { return true }
-
-func hasNamespaces(*Core) bool { return false }
-
-func (c *Core) Features() license.Features {
- return license.FeatureNone
-}
-
-func (c *Core) HasFeature(license.Features) bool {
- return false
-}
-
-func (c *Core) namepaceByPath(string) *namespace.Namespace {
- return namespace.RootNamespace
-}
-
-func (c *Core) setupReplicatedClusterPrimary(*ReplicatedCluster) error { return nil }
-
-func (c *Core) perfStandbyCount() int { return 0 }
-
-func (c *Core) removePrefixFromFilteredPaths(context.Context, string) error {
- return nil
-}
-
-func (c *Core) checkReplicatedFiltering(context.Context, *MountEntry, string) (bool, error) {
- return false, nil
-}
-
-func (c *Core) invalidateSentinelPolicy(PolicyType, string) {}
-
-func (c *Core) removePerfStandbySecondary(context.Context, string) {}
diff --git a/vendor/github.com/hashicorp/vault/vault/cors.go b/vendor/github.com/hashicorp/vault/vault/cors.go
deleted file mode 100644
index 9cbecc77..00000000
--- a/vendor/github.com/hashicorp/vault/vault/cors.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "sync"
- "sync/atomic"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- CORSDisabled uint32 = iota
- CORSEnabled
-)
-
-var StdAllowedHeaders = []string{
- "Content-Type",
- "X-Requested-With",
- "X-Vault-AWS-IAM-Server-ID",
- "X-Vault-MFA",
- "X-Vault-No-Request-Forwarding",
- "X-Vault-Wrap-Format",
- "X-Vault-Wrap-TTL",
- "X-Vault-Policy-Override",
- "Authorization",
- consts.AuthHeaderName,
-}
-
-// CORSConfig stores the state of the CORS configuration.
-type CORSConfig struct {
- sync.RWMutex `json:"-"`
- core *Core
- Enabled *uint32 `json:"enabled"`
- AllowedOrigins []string `json:"allowed_origins,omitempty"`
- AllowedHeaders []string `json:"allowed_headers,omitempty"`
-}
-
-func (c *Core) saveCORSConfig(ctx context.Context) error {
- view := c.systemBarrierView.SubView("config/")
-
- enabled := atomic.LoadUint32(c.corsConfig.Enabled)
- localConfig := &CORSConfig{
- Enabled: &enabled,
- }
- c.corsConfig.RLock()
- localConfig.AllowedOrigins = c.corsConfig.AllowedOrigins
- localConfig.AllowedHeaders = c.corsConfig.AllowedHeaders
- c.corsConfig.RUnlock()
-
- entry, err := logical.StorageEntryJSON("cors", localConfig)
- if err != nil {
- return errwrap.Wrapf("failed to create CORS config entry: {{err}}", err)
- }
-
- if err := view.Put(ctx, entry); err != nil {
- return errwrap.Wrapf("failed to save CORS config: {{err}}", err)
- }
-
- return nil
-}
-
-// This should only be called with the core state lock held for writing
-func (c *Core) loadCORSConfig(ctx context.Context) error {
- view := c.systemBarrierView.SubView("config/")
-
- // Load the config in
- out, err := view.Get(ctx, "cors")
- if err != nil {
- return errwrap.Wrapf("failed to read CORS config: {{err}}", err)
- }
- if out == nil {
- return nil
- }
-
- newConfig := new(CORSConfig)
- err = out.DecodeJSON(newConfig)
- if err != nil {
- return err
- }
-
- if newConfig.Enabled == nil {
- newConfig.Enabled = new(uint32)
- }
-
- newConfig.core = c
-
- c.corsConfig = newConfig
-
- return nil
-}
-
-// Enable takes either a '*' or a comma-separated list of URLs that can make
-// cross-origin requests to Vault.
-func (c *CORSConfig) Enable(ctx context.Context, urls []string, headers []string) error {
- if len(urls) == 0 {
- return errors.New("at least one origin or the wildcard must be provided")
- }
-
- if strutil.StrListContains(urls, "*") && len(urls) > 1 {
- return errors.New("to allow all origins the '*' must be the only value for allowed_origins")
- }
-
- c.Lock()
- c.AllowedOrigins = urls
-
- // Start with the standard headers to Vault accepts.
- c.AllowedHeaders = append(c.AllowedHeaders, StdAllowedHeaders...)
-
- // Allow the user to add additional headers to the list of
- // headers allowed on cross-origin requests.
- if len(headers) > 0 {
- c.AllowedHeaders = append(c.AllowedHeaders, headers...)
- }
- c.Unlock()
-
- atomic.StoreUint32(c.Enabled, CORSEnabled)
-
- return c.core.saveCORSConfig(ctx)
-}
-
-// IsEnabled returns the value of CORSConfig.isEnabled
-func (c *CORSConfig) IsEnabled() bool {
- return atomic.LoadUint32(c.Enabled) == CORSEnabled
-}
-
-// Disable sets CORS to disabled and clears the allowed origins & headers.
-func (c *CORSConfig) Disable(ctx context.Context) error {
- atomic.StoreUint32(c.Enabled, CORSDisabled)
- c.Lock()
-
- c.AllowedOrigins = nil
- c.AllowedHeaders = nil
-
- c.Unlock()
-
- return c.core.saveCORSConfig(ctx)
-}
-
-// IsValidOrigin determines if the origin of the request is allowed to make
-// cross-origin requests based on the CORSConfig.
-func (c *CORSConfig) IsValidOrigin(origin string) bool {
- // If we aren't enabling CORS then all origins are valid
- if !c.IsEnabled() {
- return true
- }
-
- c.RLock()
- defer c.RUnlock()
-
- if len(c.AllowedOrigins) == 0 {
- return false
- }
-
- if len(c.AllowedOrigins) == 1 && (c.AllowedOrigins)[0] == "*" {
- return true
- }
-
- return strutil.StrListContains(c.AllowedOrigins, origin)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go b/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
deleted file mode 100644
index eef5e19c..00000000
--- a/vendor/github.com/hashicorp/vault/vault/dynamic_system_view.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/hashicorp/errwrap"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/license"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/version"
-)
-
-type dynamicSystemView struct {
- core *Core
- mountEntry *MountEntry
-}
-
-func (d dynamicSystemView) DefaultLeaseTTL() time.Duration {
- def, _ := d.fetchTTLs()
- return def
-}
-
-func (d dynamicSystemView) MaxLeaseTTL() time.Duration {
- _, max := d.fetchTTLs()
- return max
-}
-
-func (d dynamicSystemView) SudoPrivilege(ctx context.Context, path string, token string) bool {
- // Resolve the token policy
- te, err := d.core.tokenStore.Lookup(ctx, token)
- if err != nil {
- d.core.logger.Error("failed to lookup token", "error", err)
- return false
- }
-
- // Ensure the token is valid
- if te == nil {
- d.core.logger.Error("entry not found for given token")
- return false
- }
-
- policies := make(map[string][]string)
- // Add token policies
- policies[te.NamespaceID] = append(policies[te.NamespaceID], te.Policies...)
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, d.core)
- if err != nil {
- d.core.logger.Error("failed to lookup token namespace", "error", err)
- return false
- }
- if tokenNS == nil {
- d.core.logger.Error("failed to lookup token namespace", "error", namespace.ErrNoNamespace)
- return false
- }
-
- // Add identity policies from all the namespaces
- entity, identityPolicies, err := d.core.fetchEntityAndDerivedPolicies(ctx, tokenNS, te.EntityID)
- if err != nil {
- d.core.logger.Error("failed to fetch identity policies", "error", err)
- return false
- }
- for nsID, nsPolicies := range identityPolicies {
- policies[nsID] = append(policies[nsID], nsPolicies...)
- }
-
- tokenCtx := namespace.ContextWithNamespace(ctx, tokenNS)
-
- // Construct the corresponding ACL object. Derive and use a new context that
- // uses the req.ClientToken's namespace
- acl, err := d.core.policyStore.ACL(tokenCtx, entity, policies)
- if err != nil {
- d.core.logger.Error("failed to retrieve ACL for token's policies", "token_policies", te.Policies, "error", err)
- return false
- }
-
- // The operation type isn't important here as this is run from a path the
- // user has already been given access to; we only care about whether they
- // have sudo
- req := new(logical.Request)
- req.Operation = logical.ReadOperation
- req.Path = path
- authResults := acl.AllowOperation(ctx, req, true)
- return authResults.RootPrivs
-}
-
-// TTLsByPath returns the default and max TTLs corresponding to a particular
-// mount point, or the system default
-func (d dynamicSystemView) fetchTTLs() (def, max time.Duration) {
- def = d.core.defaultLeaseTTL
- max = d.core.maxLeaseTTL
-
- if d.mountEntry != nil {
- if d.mountEntry.Config.DefaultLeaseTTL != 0 {
- def = d.mountEntry.Config.DefaultLeaseTTL
- }
- if d.mountEntry.Config.MaxLeaseTTL != 0 {
- max = d.mountEntry.Config.MaxLeaseTTL
- }
- }
-
- return
-}
-
-// Tainted indicates that the mount is in the process of being removed
-func (d dynamicSystemView) Tainted() bool {
- return d.mountEntry.Tainted
-}
-
-// CachingDisabled indicates whether to use caching behavior
-func (d dynamicSystemView) CachingDisabled() bool {
- return d.core.cachingDisabled || (d.mountEntry != nil && d.mountEntry.Config.ForceNoCache)
-}
-
-func (d dynamicSystemView) LocalMount() bool {
- return d.mountEntry != nil && d.mountEntry.Local
-}
-
-// Checks if this is a primary Vault instance. Caller should hold the stateLock
-// in read mode.
-func (d dynamicSystemView) ReplicationState() consts.ReplicationState {
- state := d.core.ReplicationState()
- if d.core.perfStandby {
- state |= consts.ReplicationPerformanceStandby
- }
- return state
-}
-
-func (d dynamicSystemView) HasFeature(feature license.Features) bool {
- return d.core.HasFeature(feature)
-}
-
-// ResponseWrapData wraps the given data in a cubbyhole and returns the
-// token used to unwrap.
-func (d dynamicSystemView) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) {
- req := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "sys/wrapping/wrap",
- }
-
- resp := &logical.Response{
- WrapInfo: &wrapping.ResponseWrapInfo{
- TTL: ttl,
- },
- Data: data,
- }
-
- if jwt {
- resp.WrapInfo.Format = "jwt"
- }
-
- _, err := d.core.wrapInCubbyhole(ctx, req, resp, nil)
- if err != nil {
- return nil, err
- }
-
- return resp.WrapInfo, nil
-}
-
-// LookupPlugin looks for a plugin with the given name in the plugin catalog. It
-// returns a PluginRunner or an error if no plugin was found.
-func (d dynamicSystemView) LookupPlugin(ctx context.Context, name string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) {
- if d.core == nil {
- return nil, fmt.Errorf("system view core is nil")
- }
- if d.core.pluginCatalog == nil {
- return nil, fmt.Errorf("system view core plugin catalog is nil")
- }
- r, err := d.core.pluginCatalog.Get(ctx, name, pluginType)
- if err != nil {
- return nil, err
- }
- if r == nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("{{err}}: %s", name), ErrPluginNotFound)
- }
-
- return r, nil
-}
-
-// MlockEnabled returns the configuration setting for enabling mlock on plugins.
-func (d dynamicSystemView) MlockEnabled() bool {
- return d.core.enableMlock
-}
-
-func (d dynamicSystemView) EntityInfo(entityID string) (*logical.Entity, error) {
- // Requests from token created from the token backend will not have entity information.
- // Return missing entity instead of error when requesting from MemDB.
- if entityID == "" {
- return nil, nil
- }
-
- if d.core == nil {
- return nil, fmt.Errorf("system view core is nil")
- }
- if d.core.identityStore == nil {
- return nil, fmt.Errorf("system view identity store is nil")
- }
-
- // Retrieve the entity from MemDB
- entity, err := d.core.identityStore.MemDBEntityByID(entityID, false)
- if err != nil {
- return nil, err
- }
- if entity == nil {
- return nil, nil
- }
-
- // Return a subset of the data
- ret := &logical.Entity{
- ID: entity.ID,
- Name: entity.Name,
- }
-
- if entity.Metadata != nil {
- ret.Metadata = make(map[string]string, len(entity.Metadata))
- for k, v := range entity.Metadata {
- ret.Metadata[k] = v
- }
- }
-
- aliases := make([]*logical.Alias, len(entity.Aliases))
- for i, a := range entity.Aliases {
- alias := &logical.Alias{
- MountAccessor: a.MountAccessor,
- Name: a.Name,
- }
- // MountType is not stored with the entity and must be looked up
- if mount := d.core.router.validateMountByAccessor(a.MountAccessor); mount != nil {
- alias.MountType = mount.MountType
- }
-
- if a.Metadata != nil {
- alias.Metadata = make(map[string]string, len(a.Metadata))
- for k, v := range a.Metadata {
- alias.Metadata[k] = v
- }
- }
-
- aliases[i] = alias
- }
- ret.Aliases = aliases
-
- return ret, nil
-}
-
-func (d dynamicSystemView) PluginEnv(_ context.Context) (*logical.PluginEnvironment, error) {
- return &logical.PluginEnvironment{
- VaultVersion: version.GetVersion().Version,
- }, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration.go b/vendor/github.com/hashicorp/vault/vault/expiration.go
deleted file mode 100644
index 54bc98f2..00000000
--- a/vendor/github.com/hashicorp/vault/vault/expiration.go
+++ /dev/null
@@ -1,1802 +0,0 @@
-package vault
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "path"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- log "github.com/hashicorp/go-hclog"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/base62"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- // expirationSubPath is the sub-path used for the expiration manager
- // view. This is nested under the system view.
- expirationSubPath = "expire/"
-
- // leaseViewPrefix is the prefix used for the ID based lookup of leases.
- leaseViewPrefix = "id/"
-
- // tokenViewPrefix is the prefix used for the token based lookup of leases.
- tokenViewPrefix = "token/"
-
- // maxRevokeAttempts limits how many revoke attempts are made
- maxRevokeAttempts = 6
-
- // revokeRetryBase is a baseline retry time
- revokeRetryBase = 10 * time.Second
-
- // maxLeaseDuration is the default maximum lease duration
- maxLeaseTTL = 32 * 24 * time.Hour
-
- // defaultLeaseDuration is the default lease duration used when no lease is specified
- defaultLeaseTTL = maxLeaseTTL
-
- //maxLeaseThreshold is the maximum lease count before generating log warning
- maxLeaseThreshold = 256000
-)
-
-type pendingInfo struct {
- exportLeaseTimes *leaseEntry
- timer *time.Timer
-}
-
-// ExpirationManager is used by the Core to manage leases. Secrets
-// can provide a lease, meaning that they can be renewed or revoked.
-// If a secret is not renewed in timely manner, it may be expired, and
-// the ExpirationManager will handle doing automatic revocation.
-type ExpirationManager struct {
- core *Core
- router *Router
- idView *BarrierView
- tokenView *BarrierView
- tokenStore *TokenStore
- logger log.Logger
-
- pending map[string]pendingInfo
- pendingLock sync.RWMutex
-
- tidyLock *int32
-
- restoreMode *int32
- restoreModeLock sync.RWMutex
- restoreRequestLock sync.RWMutex
- restoreLocks []*locksutil.LockEntry
- restoreLoaded sync.Map
- quitCh chan struct{}
-
- coreStateLock *sync.RWMutex
- quitContext context.Context
- leaseCheckCounter *uint32
-
- logLeaseExpirations bool
- expireFunc ExpireLeaseStrategy
-}
-
-type ExpireLeaseStrategy func(context.Context, *ExpirationManager, *leaseEntry)
-
-// revokeIDFunc is invoked when a given ID is expired
-func expireLeaseStrategyRevoke(ctx context.Context, m *ExpirationManager, le *leaseEntry) {
- for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ {
- revokeCtx, cancel := context.WithTimeout(ctx, DefaultMaxRequestDuration)
- revokeCtx = namespace.ContextWithNamespace(revokeCtx, le.namespace)
-
- go func() {
- select {
- case <-ctx.Done():
- case <-m.quitCh:
- cancel()
- case <-revokeCtx.Done():
- }
- }()
-
- select {
- case <-m.quitCh:
- m.logger.Error("shutting down, not attempting further revocation of lease", "lease_id", le.LeaseID)
- cancel()
- return
- case <-m.quitContext.Done():
- m.logger.Error("core context canceled, not attempting further revocation of lease", "lease_id", le.LeaseID)
- cancel()
- return
- default:
- }
-
- m.coreStateLock.RLock()
- err := m.Revoke(revokeCtx, le.LeaseID)
- m.coreStateLock.RUnlock()
- cancel()
- if err == nil {
- return
- }
-
- m.logger.Error("failed to revoke lease", "lease_id", le.LeaseID, "error", err)
- time.Sleep((1 << attempt) * revokeRetryBase)
- }
- m.logger.Error("maximum revoke attempts reached", "lease_id", le.LeaseID)
-}
-
-// NewExpirationManager creates a new ExpirationManager that is backed
-// using a given view, and uses the provided router for revocation.
-func NewExpirationManager(c *Core, view *BarrierView, e ExpireLeaseStrategy, logger log.Logger) *ExpirationManager {
- exp := &ExpirationManager{
- core: c,
- router: c.router,
- idView: view.SubView(leaseViewPrefix),
- tokenView: view.SubView(tokenViewPrefix),
- tokenStore: c.tokenStore,
- logger: logger,
- pending: make(map[string]pendingInfo),
- tidyLock: new(int32),
-
- // new instances of the expiration manager will go immediately into
- // restore mode
- restoreMode: new(int32),
- restoreLocks: locksutil.CreateLocks(),
- quitCh: make(chan struct{}),
-
- coreStateLock: &c.stateLock,
- quitContext: c.activeContext,
- leaseCheckCounter: new(uint32),
-
- logLeaseExpirations: os.Getenv("VAULT_SKIP_LOGGING_LEASE_EXPIRATIONS") == "",
- expireFunc: e,
- }
- *exp.restoreMode = 1
-
- if exp.logger == nil {
- opts := log.LoggerOptions{Name: "expiration_manager"}
- exp.logger = log.New(&opts)
- }
-
- return exp
-}
-
-// setupExpiration is invoked after we've loaded the mount table to
-// initialize the expiration manager
-func (c *Core) setupExpiration(e ExpireLeaseStrategy) error {
- c.metricsMutex.Lock()
- defer c.metricsMutex.Unlock()
- // Create a sub-view
- view := c.systemBarrierView.SubView(expirationSubPath)
-
- // Create the manager
- expLogger := c.baseLogger.Named("expiration")
- c.AddLogger(expLogger)
- mgr := NewExpirationManager(c, view, e, expLogger)
- c.expiration = mgr
-
- // Link the token store to this
- c.tokenStore.SetExpirationManager(mgr)
-
- // Restore the existing state
- c.logger.Info("restoring leases")
- errorFunc := func() {
- c.logger.Error("shutting down")
- if err := c.Shutdown(); err != nil {
- c.logger.Error("error shutting down core", "error", err)
- }
- }
- go c.expiration.Restore(errorFunc)
-
- return nil
-}
-
-// stopExpiration is used to stop the expiration manager before
-// sealing the Vault.
-func (c *Core) stopExpiration() error {
- if c.expiration != nil {
- if err := c.expiration.Stop(); err != nil {
- return err
- }
- c.metricsMutex.Lock()
- defer c.metricsMutex.Unlock()
- c.expiration = nil
- }
- return nil
-}
-
-// lockLease takes out a lock for a given lease ID
-func (m *ExpirationManager) lockLease(leaseID string) {
- locksutil.LockForKey(m.restoreLocks, leaseID).Lock()
-}
-
-// unlockLease unlocks a given lease ID
-func (m *ExpirationManager) unlockLease(leaseID string) {
- locksutil.LockForKey(m.restoreLocks, leaseID).Unlock()
-}
-
-// inRestoreMode returns if we are currently in restore mode
-func (m *ExpirationManager) inRestoreMode() bool {
- return atomic.LoadInt32(m.restoreMode) == 1
-}
-
-func (m *ExpirationManager) invalidate(key string) {
-
- switch {
- case strings.HasPrefix(key, leaseViewPrefix):
- // Clear from the pending expiration
- leaseID := strings.TrimPrefix(key, leaseViewPrefix)
- m.pendingLock.Lock()
- if pending, ok := m.pending[leaseID]; ok {
- pending.timer.Stop()
- delete(m.pending, leaseID)
- }
- m.pendingLock.Unlock()
- }
-}
-
-// Tidy cleans up the dangling storage entries for leases. It scans the storage
-// view to find all the available leases, checks if the token embedded in it is
-// either empty or invalid and in both the cases, it revokes them. It also uses
-// a token cache to avoid multiple lookups of the same token ID. It is normally
-// not required to use the API that invokes this. This is only intended to
-// clean up the corrupt storage due to bugs.
-func (m *ExpirationManager) Tidy(ctx context.Context) error {
- if m.inRestoreMode() {
- return errors.New("cannot run tidy while restoring leases")
- }
-
- var tidyErrors *multierror.Error
-
- logger := m.logger.Named("tidy")
- m.core.AddLogger(logger)
-
- if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) {
- logger.Warn("tidy operation on leases is already in progress")
- return nil
- }
-
- defer atomic.CompareAndSwapInt32(m.tidyLock, 1, 0)
-
- logger.Info("beginning tidy operation on leases")
- defer logger.Info("finished tidy operation on leases")
-
- // Create a cache to keep track of looked up tokens
- tokenCache := make(map[string]bool)
- var countLease, revokedCount, deletedCountInvalidToken, deletedCountEmptyToken int64
-
- tidyFunc := func(leaseID string) {
- countLease++
- if countLease%500 == 0 {
- logger.Info("tidying leases", "progress", countLease)
- }
-
- le, err := m.loadEntry(ctx, leaseID)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to load the lease ID %q: {{err}}", leaseID), err))
- return
- }
-
- if le == nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("nil entry for lease ID %q: {{err}}", leaseID), err))
- return
- }
-
- var isValid, ok bool
- revokeLease := false
- if le.ClientToken == "" {
- logger.Debug("revoking lease which has an empty token", "lease_id", leaseID)
- revokeLease = true
- deletedCountEmptyToken++
- goto REVOKE_CHECK
- }
-
- isValid, ok = tokenCache[le.ClientToken]
- if !ok {
- lock := locksutil.LockForKey(m.tokenStore.tokenLocks, le.ClientToken)
- lock.RLock()
- te, err := m.tokenStore.lookupInternal(ctx, le.ClientToken, false, true)
- lock.RUnlock()
-
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup token: {{err}}", err))
- return
- }
-
- if te == nil {
- logger.Debug("revoking lease which holds an invalid token", "lease_id", leaseID)
- revokeLease = true
- deletedCountInvalidToken++
- tokenCache[le.ClientToken] = false
- } else {
- tokenCache[le.ClientToken] = true
- }
- goto REVOKE_CHECK
- } else {
- if isValid {
- return
- }
-
- logger.Debug("revoking lease which contains an invalid token", "lease_id", leaseID)
- revokeLease = true
- deletedCountInvalidToken++
- goto REVOKE_CHECK
- }
-
- REVOKE_CHECK:
- if revokeLease {
- // Force the revocation and skip going through the token store
- // again
- err = m.revokeCommon(ctx, leaseID, true, true)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf(fmt.Sprintf("failed to revoke an invalid lease with ID %q: {{err}}", leaseID), err))
- return
- }
- revokedCount++
- }
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- leaseView := m.leaseView(ns)
- if err := logical.ScanView(m.quitContext, leaseView, tidyFunc); err != nil {
- return err
- }
-
- logger.Info("number of leases scanned", "count", countLease)
- logger.Info("number of leases which had empty tokens", "count", deletedCountEmptyToken)
- logger.Info("number of leases which had invalid tokens", "count", deletedCountInvalidToken)
- logger.Info("number of leases successfully revoked", "count", revokedCount)
-
- return tidyErrors.ErrorOrNil()
-}
-
-// Restore is used to recover the lease states when starting.
-// This is used after starting the vault.
-func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) {
- defer func() {
- // Turn off restore mode. We can do this safely without the lock because
- // if restore mode finished successfully, restore mode was already
- // disabled with the lock. In an error state, this will allow the
- // Stop() function to shut everything down.
- atomic.StoreInt32(m.restoreMode, 0)
-
- switch {
- case retErr == nil:
- case strings.Contains(retErr.Error(), context.Canceled.Error()):
- // Don't run error func because we lost leadership
- m.logger.Warn("context cancled while restoring leases, stopping lease loading")
- retErr = nil
- case errwrap.Contains(retErr, ErrBarrierSealed.Error()):
- // Don't run error func because we're likely already shutting down
- m.logger.Warn("barrier sealed while restoring leases, stopping lease loading")
- retErr = nil
- default:
- m.logger.Error("error restoring leases", "error", retErr)
- if errorFunc != nil {
- errorFunc()
- }
- }
- }()
-
- // Accumulate existing leases
- m.logger.Debug("collecting leases")
- existing, leaseCount, err := m.collectLeases()
- if err != nil {
- return err
- }
- m.logger.Debug("leases collected", "num_existing", leaseCount)
-
- // Make the channels used for the worker pool
- type lease struct {
- namespace *namespace.Namespace
- id string
- }
- broker := make(chan *lease)
- quit := make(chan bool)
- // Buffer these channels to prevent deadlocks
- errs := make(chan error, len(existing))
- result := make(chan struct{}, len(existing))
-
- // Use a wait group
- wg := &sync.WaitGroup{}
-
- // Create 64 workers to distribute work to
- for i := 0; i < consts.ExpirationRestoreWorkerCount; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- for {
- select {
- case lease, ok := <-broker:
- // broker has been closed, we are done
- if !ok {
- return
- }
-
- ctx := namespace.ContextWithNamespace(m.quitContext, lease.namespace)
- err := m.processRestore(ctx, lease.id)
- if err != nil {
- errs <- err
- continue
- }
-
- // Send message that lease is done
- result <- struct{}{}
-
- // quit early
- case <-quit:
- return
-
- case <-m.quitCh:
- return
- }
- }
- }()
- }
-
- // Distribute the collected keys to the workers in a go routine
- wg.Add(1)
- go func() {
- defer wg.Done()
- i := 0
- for ns := range existing {
- for _, leaseID := range existing[ns] {
- i++
- if i%500 == 0 {
- m.logger.Debug("leases loading", "progress", i)
- }
-
- select {
- case <-quit:
- return
-
- case <-m.quitCh:
- return
-
- default:
- broker <- &lease{
- namespace: ns,
- id: leaseID,
- }
- }
- }
- }
-
- // Close the broker, causing worker routines to exit
- close(broker)
- }()
-
- // Ensure all keys on the chan are processed
- for i := 0; i < leaseCount; i++ {
- select {
- case err := <-errs:
- // Close all go routines
- close(quit)
- return err
-
- case <-m.quitCh:
- close(quit)
- return nil
-
- case <-result:
- }
- }
-
- // Let all go routines finish
- wg.Wait()
-
- m.restoreModeLock.Lock()
- atomic.StoreInt32(m.restoreMode, 0)
- m.restoreLoaded.Range(func(k, v interface{}) bool {
- m.restoreLoaded.Delete(k)
- return true
- })
- m.restoreLocks = nil
- m.restoreModeLock.Unlock()
-
- m.logger.Info("lease restore complete")
- return nil
-}
-
-// processRestore takes a lease and restores it in the expiration manager if it has
-// not already been seen
-func (m *ExpirationManager) processRestore(ctx context.Context, leaseID string) error {
- m.restoreRequestLock.RLock()
- defer m.restoreRequestLock.RUnlock()
-
- // Check if the lease has been seen
- if _, ok := m.restoreLoaded.Load(leaseID); ok {
- return nil
- }
-
- m.lockLease(leaseID)
- defer m.unlockLease(leaseID)
-
- // Check again with the lease locked
- if _, ok := m.restoreLoaded.Load(leaseID); ok {
- return nil
- }
-
- // Load lease and restore expiration timer
- _, err := m.loadEntryInternal(ctx, leaseID, true, false)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Stop is used to prevent further automatic revocations.
-// This must be called before sealing the view.
-func (m *ExpirationManager) Stop() error {
- // Stop all the pending expiration timers
- m.logger.Debug("stop triggered")
- defer m.logger.Debug("finished stopping")
-
- // Do this before stopping pending timers to avoid potential races with
- // expiring timers
- close(m.quitCh)
-
- m.pendingLock.Lock()
- for _, pending := range m.pending {
- pending.timer.Stop()
- }
- m.pending = make(map[string]pendingInfo)
- m.pendingLock.Unlock()
-
- if m.inRestoreMode() {
- for {
- if !m.inRestoreMode() {
- break
- }
- time.Sleep(10 * time.Millisecond)
- }
- }
-
- return nil
-}
-
-// Revoke is used to revoke a secret named by the given LeaseID
-func (m *ExpirationManager) Revoke(ctx context.Context, leaseID string) error {
- defer metrics.MeasureSince([]string{"expire", "revoke"}, time.Now())
-
- return m.revokeCommon(ctx, leaseID, false, false)
-}
-
-// LazyRevoke is used to queue revocation for a secret named by the given
-// LeaseID. If the lease was not found it returns nil; if the lease was found
-// it triggers a return of a 202.
-func (m *ExpirationManager) LazyRevoke(ctx context.Context, leaseID string) error {
- defer metrics.MeasureSince([]string{"expire", "lazy-revoke"}, time.Now())
-
- // Load the entry
- le, err := m.loadEntry(ctx, leaseID)
- if err != nil {
- return err
- }
-
- // If there is no entry, nothing to revoke
- if le == nil {
- return nil
- }
-
- le.ExpireTime = time.Now()
- {
- m.pendingLock.Lock()
- if err := m.persistEntry(ctx, le); err != nil {
- m.pendingLock.Unlock()
- return err
- }
-
- m.updatePendingInternal(le, 0)
- m.pendingLock.Unlock()
- }
-
- return nil
-}
-
-// revokeCommon does the heavy lifting. If force is true, we ignore a problem
-// during revocation and still remove entries/index/lease timers
-func (m *ExpirationManager) revokeCommon(ctx context.Context, leaseID string, force, skipToken bool) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-common"}, time.Now())
-
- // Load the entry
- le, err := m.loadEntry(ctx, leaseID)
- if err != nil {
- return err
- }
-
- // If there is no entry, nothing to revoke
- if le == nil {
- return nil
- }
-
- // Revoke the entry
- if !skipToken || le.Auth == nil {
- if err := m.revokeEntry(ctx, le); err != nil {
- if !force {
- return err
- }
-
- if m.logger.IsWarn() {
- m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err)
- }
- }
- }
-
- // Delete the entry
- if err := m.deleteEntry(ctx, le); err != nil {
- return err
- }
-
- // Delete the secondary index, but only if it's a leased secret (not auth)
- if le.Secret != nil {
- if err := m.removeIndexByToken(ctx, le); err != nil {
- return err
- }
- }
-
- // Clear the expiration handler
- m.pendingLock.Lock()
- if pending, ok := m.pending[leaseID]; ok {
- pending.timer.Stop()
- delete(m.pending, leaseID)
- }
- m.pendingLock.Unlock()
-
- if m.logger.IsInfo() && !skipToken && m.logLeaseExpirations {
- m.logger.Info("revoked lease", "lease_id", leaseID)
- }
-
- return nil
-}
-
-// RevokeForce works similarly to RevokePrefix but continues in the case of a
-// revocation error; this is mostly meant for recovery operations
-func (m *ExpirationManager) RevokeForce(ctx context.Context, prefix string) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-force"}, time.Now())
-
- return m.revokePrefixCommon(ctx, prefix, true, true)
-}
-
-// RevokePrefix is used to revoke all secrets with a given prefix.
-// The prefix maps to that of the mount table to make this simpler
-// to reason about.
-func (m *ExpirationManager) RevokePrefix(ctx context.Context, prefix string, sync bool) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-prefix"}, time.Now())
-
- return m.revokePrefixCommon(ctx, prefix, false, sync)
-}
-
-// RevokeByToken is used to revoke all the secrets issued with a given token.
-// This is done by using the secondary index. It also removes the lease entry
-// for the token itself. As a result it should *ONLY* ever be called from the
-// token store's revokeSalted function.
-func (m *ExpirationManager) RevokeByToken(ctx context.Context, te *logical.TokenEntry) error {
- defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now())
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core)
- if err != nil {
- return err
- }
- if tokenNS == nil {
- return namespace.ErrNoNamespace
- }
-
- tokenCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- // Lookup the leases
- existing, err := m.lookupLeasesByToken(tokenCtx, te)
- if err != nil {
- return errwrap.Wrapf("failed to scan for leases: {{err}}", err)
- }
-
- // Revoke all the keys
- for _, leaseID := range existing {
- // Load the entry
- le, err := m.loadEntry(ctx, leaseID)
- if err != nil {
- return err
- }
-
- // If there's a lease, set expiration to now, persist, and call
- // updatePending to hand off revocation to the expiration manager's pending
- // timer map
- if le != nil {
- le.ExpireTime = time.Now()
-
- {
- m.pendingLock.Lock()
- if err := m.persistEntry(ctx, le); err != nil {
- m.pendingLock.Unlock()
- return err
- }
-
- m.updatePendingInternal(le, 0)
- m.pendingLock.Unlock()
- }
- }
- }
-
- // te.Path should never be empty, but we check just in case
- if te.Path != "" {
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID)
- if err != nil {
- return err
- }
- tokenLeaseID := path.Join(te.Path, saltedID)
-
- if tokenNS.ID != namespace.RootNamespaceID {
- tokenLeaseID = fmt.Sprintf("%s.%s", tokenLeaseID, tokenNS.ID)
- }
-
- // We want to skip the revokeEntry call as that will call back into
- // revocation logic in the token store, which is what is running this
- // function in the first place -- it'd be a deadlock loop. Since the only
- // place that this function is called is revokeSalted in the token store,
- // we're already revoking the token, so we just want to clean up the lease.
- // This avoids spurious revocations later in the log when the timer runs
- // out, and eases up resource usage.
- return m.revokeCommon(ctx, tokenLeaseID, false, true)
- }
-
- return nil
-}
-
-func (m *ExpirationManager) revokePrefixCommon(ctx context.Context, prefix string, force, sync bool) error {
- if m.inRestoreMode() {
- m.restoreRequestLock.Lock()
- defer m.restoreRequestLock.Unlock()
- }
-
- // Ensure there is a trailing slash; or, if there is no slash, see if there
- // is a matching specific ID
- if !strings.HasSuffix(prefix, "/") {
- le, err := m.loadEntry(ctx, prefix)
- if err == nil && le != nil {
- if sync {
- if err := m.revokeCommon(ctx, prefix, force, false); err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q: {{err}}", prefix), err)
- }
- return nil
- }
- return m.LazyRevoke(ctx, prefix)
- }
- prefix = prefix + "/"
- }
-
- // Accumulate existing leases
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- view := m.leaseView(ns)
- sub := view.SubView(prefix)
- existing, err := logical.CollectKeys(ctx, sub)
- if err != nil {
- return errwrap.Wrapf("failed to scan for leases: {{err}}", err)
- }
-
- // Revoke all the keys
- for idx, suffix := range existing {
- leaseID := prefix + suffix
- switch {
- case sync:
- if err := m.revokeCommon(ctx, leaseID, force, false); err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q (%d / %d): {{err}}", leaseID, idx+1, len(existing)), err)
- }
- default:
- if err := m.LazyRevoke(ctx, leaseID); err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to revoke %q (%d / %d): {{err}}", leaseID, idx+1, len(existing)), err)
- }
- }
- }
-
- return nil
-}
-
-// Renew is used to renew a secret using the given leaseID
-// and a renew interval. The increment may be ignored.
-func (m *ExpirationManager) Renew(ctx context.Context, leaseID string, increment time.Duration) (*logical.Response, error) {
- defer metrics.MeasureSince([]string{"expire", "renew"}, time.Now())
-
- // Load the entry
- le, err := m.loadEntry(ctx, leaseID)
- if err != nil {
- return nil, err
- }
-
- // Check if the lease is renewable
- if _, err := le.renewable(); err != nil {
- return nil, err
- }
-
- if le.Secret == nil {
- if le.Auth != nil {
- return logical.ErrorResponse("tokens cannot be renewed through this endpoint"), logical.ErrPermissionDenied
- }
- return logical.ErrorResponse("lease does not correspond to a secret"), nil
- }
-
- reqNS, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if reqNS.ID != le.namespace.ID {
- return nil, errors.New("cannot renew a lease across namespaces")
- }
-
- sysViewCtx := namespace.ContextWithNamespace(ctx, le.namespace)
- sysView := m.router.MatchingSystemView(sysViewCtx, le.Path)
- if sysView == nil {
- return nil, fmt.Errorf("unable to retrieve system view from router")
- }
-
- // Attempt to renew the entry
- resp, err := m.renewEntry(ctx, le, increment)
- if err != nil {
- return nil, err
- }
- if resp == nil {
- return nil, nil
- }
- if resp.IsError() {
- return &logical.Response{
- Data: resp.Data,
- }, nil
- }
- if resp.Secret == nil {
- return nil, nil
- }
-
- ttl, warnings, err := framework.CalculateTTL(sysView, increment, resp.Secret.TTL, 0, resp.Secret.MaxTTL, 0, le.IssueTime)
- if err != nil {
- return nil, err
- }
- for _, warning := range warnings {
- resp.AddWarning(warning)
- }
- resp.Secret.TTL = ttl
-
- // Attach the LeaseID
- resp.Secret.LeaseID = leaseID
-
- // Update the lease entry
- le.Data = resp.Data
- le.Secret = resp.Secret
- le.ExpireTime = resp.Secret.ExpirationTime()
- le.LastRenewalTime = time.Now()
-
- // If the token it's associated with is a batch token, constrain lease
- // times
- if le.ClientTokenType == logical.TokenTypeBatch {
- te, err := m.tokenStore.Lookup(ctx, le.ClientToken)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return nil, errors.New("cannot renew lease, no valid associated token")
- }
- tokenLeaseTimes, err := m.FetchLeaseTimesByToken(ctx, te)
- if err != nil {
- return nil, err
- }
- if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) {
- resp.Secret.TTL = tokenLeaseTimes.ExpireTime.Sub(le.LastRenewalTime)
- le.ExpireTime = tokenLeaseTimes.ExpireTime
- }
- }
-
- {
- m.pendingLock.Lock()
- if err := m.persistEntry(ctx, le); err != nil {
- m.pendingLock.Unlock()
- return nil, err
- }
-
- // Update the expiration time
- m.updatePendingInternal(le, resp.Secret.LeaseTotal())
- m.pendingLock.Unlock()
- }
-
- // Return the response
- return resp, nil
-}
-
-// RenewToken is used to renew a token which does not need to
-// invoke a logical backend.
-func (m *ExpirationManager) RenewToken(ctx context.Context, req *logical.Request, te *logical.TokenEntry,
- increment time.Duration) (*logical.Response, error) {
- defer metrics.MeasureSince([]string{"expire", "renew-token"}, time.Now())
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core)
- if err != nil {
- return nil, err
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns.ID != tokenNS.ID {
- return nil, errors.New("cannot renew a token across namespaces")
- }
-
- // Compute the Lease ID
- saltedID, err := m.tokenStore.SaltID(ctx, te.ID)
- if err != nil {
- return nil, err
- }
-
- leaseID := path.Join(te.Path, saltedID)
-
- if ns.ID != namespace.RootNamespaceID {
- leaseID = fmt.Sprintf("%s.%s", leaseID, ns.ID)
- }
-
- // Load the entry
- le, err := m.loadEntry(ctx, leaseID)
- if err != nil {
- return nil, err
- }
- if le == nil {
- return logical.ErrorResponse("invalid lease ID"), logical.ErrInvalidRequest
- }
-
- // Check if the lease is renewable. Note that this also checks for a nil
- // lease and errors in that case as well.
- if _, err := le.renewable(); err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-
- // Attempt to renew the auth entry
- resp, err := m.renewAuthEntry(ctx, req, le, increment)
- if err != nil {
- return nil, err
- }
- if resp == nil {
- return nil, nil
- }
- if resp.IsError() {
- return &logical.Response{
- Data: resp.Data,
- }, nil
- }
- if resp.Auth == nil {
- return nil, nil
- }
-
- sysViewCtx := namespace.ContextWithNamespace(ctx, le.namespace)
- sysView := m.router.MatchingSystemView(sysViewCtx, le.Path)
- if sysView == nil {
- return nil, fmt.Errorf("unable to retrieve system view from router")
- }
-
- ttl, warnings, err := framework.CalculateTTL(sysView, increment, resp.Auth.TTL, resp.Auth.Period, resp.Auth.MaxTTL, resp.Auth.ExplicitMaxTTL, le.IssueTime)
- if err != nil {
- return nil, err
- }
- retResp := &logical.Response{}
- for _, warning := range warnings {
- retResp.AddWarning(warning)
- }
- resp.Auth.TTL = ttl
-
- // Attach the ClientToken
- resp.Auth.ClientToken = te.ID
-
- // Refresh groups
- if resp.Auth.EntityID != "" &&
- resp.Auth.GroupAliases != nil &&
- m.core.identityStore != nil {
- validAliases, err := m.core.identityStore.refreshExternalGroupMembershipsByEntityID(resp.Auth.EntityID, resp.Auth.GroupAliases)
- if err != nil {
- return nil, err
- }
- resp.Auth.GroupAliases = validAliases
- }
-
- // Update the lease entry
- le.Auth = resp.Auth
- le.ExpireTime = resp.Auth.ExpirationTime()
- le.LastRenewalTime = time.Now()
-
- {
- m.pendingLock.Lock()
- if err := m.persistEntry(ctx, le); err != nil {
- m.pendingLock.Unlock()
- return nil, err
- }
-
- // Update the expiration time
- m.updatePendingInternal(le, resp.Auth.LeaseTotal())
- m.pendingLock.Unlock()
- }
-
- retResp.Auth = resp.Auth
- return retResp, nil
-}
-
-// Register is used to take a request and response with an associated
-// lease. The secret gets assigned a LeaseID and the management of
-// of lease is assumed by the expiration manager.
-func (m *ExpirationManager) Register(ctx context.Context, req *logical.Request, resp *logical.Response) (id string, retErr error) {
- defer metrics.MeasureSince([]string{"expire", "register"}, time.Now())
-
- te := req.TokenEntry()
- if te == nil {
- return "", fmt.Errorf("cannot register a lease with an empty client token")
- }
-
- // Ignore if there is no leased secret
- if resp == nil || resp.Secret == nil {
- return "", nil
- }
-
- // Validate the secret
- if err := resp.Secret.Validate(); err != nil {
- return "", err
- }
-
- // Create a lease entry
- leaseRand, err := base62.Random(TokenLength, true)
- if err != nil {
- return "", err
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return "", err
- }
-
- leaseID := path.Join(req.Path, leaseRand)
-
- if ns.ID != namespace.RootNamespaceID {
- leaseID = fmt.Sprintf("%s.%s", leaseID, ns.ID)
- }
-
- le := &leaseEntry{
- LeaseID: leaseID,
- ClientToken: req.ClientToken,
- ClientTokenType: te.Type,
- Path: req.Path,
- Data: resp.Data,
- Secret: resp.Secret,
- IssueTime: time.Now(),
- ExpireTime: resp.Secret.ExpirationTime(),
- namespace: ns,
- }
-
- defer func() {
- // If there is an error we want to rollback as much as possible (note
- // that errors here are ignored to do as much cleanup as we can). We
- // want to revoke a generated secret (since an error means we may not
- // be successfully tracking it), remove indexes, and delete the entry.
- if retErr != nil {
- revokeCtx := namespace.ContextWithNamespace(m.quitContext, ns)
- revResp, err := m.router.Route(revokeCtx, logical.RevokeRequest(req.Path, resp.Secret, resp.Data))
- if err != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional internal error was encountered revoking the newly-generated secret: {{err}}", err))
- } else if revResp != nil && revResp.IsError() {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered revoking the newly-generated secret: {{err}}", revResp.Error()))
- }
-
- if err := m.deleteEntry(ctx, le); err != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered deleting any lease associated with the newly-generated secret: {{err}}", err))
- }
-
- if err := m.removeIndexByToken(ctx, le); err != nil {
- retErr = multierror.Append(retErr, errwrap.Wrapf("an additional error was encountered removing lease indexes associated with the newly-generated secret: {{err}}", err))
- }
- }
- }()
-
- // If the token is a batch token, we want to constrain the maximum lifetime
- // by the token's lifetime
- if te.Type == logical.TokenTypeBatch {
- tokenLeaseTimes, err := m.FetchLeaseTimesByToken(ctx, te)
- if err != nil {
- return "", err
- }
- if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) {
- le.ExpireTime = tokenLeaseTimes.ExpireTime
- }
- }
-
- // Encode the entry
- if err := m.persistEntry(ctx, le); err != nil {
- return "", err
- }
-
- // Maintain secondary index by token, except for orphan batch tokens
- switch {
- case te.Type != logical.TokenTypeBatch:
- if err := m.createIndexByToken(ctx, le, le.ClientToken); err != nil {
- return "", err
- }
- case te.Parent != "":
- // If it's a non-orphan batch token, assign the secondary index to its
- // parent
- if err := m.createIndexByToken(ctx, le, te.Parent); err != nil {
- return "", err
- }
- }
-
- // Setup revocation timer if there is a lease
- m.updatePending(le, resp.Secret.LeaseTotal())
-
- // Done
- return le.LeaseID, nil
-}
-
-// RegisterAuth is used to take an Auth response with an associated lease.
-// The token does not get a LeaseID, but the lease management is handled by
-// the expiration manager.
-func (m *ExpirationManager) RegisterAuth(ctx context.Context, te *logical.TokenEntry, auth *logical.Auth) error {
- defer metrics.MeasureSince([]string{"expire", "register-auth"}, time.Now())
-
- if te.Type == logical.TokenTypeBatch {
- return errors.New("cannot register a lease for a batch token")
- }
-
- if auth.ClientToken == "" {
- return errors.New("cannot register an auth lease with an empty token")
- }
-
- if strings.Contains(te.Path, "..") {
- return consts.ErrPathContainsParentReferences
- }
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core)
- if err != nil {
- return err
- }
- if tokenNS == nil {
- return namespace.ErrNoNamespace
- }
-
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- saltedID, err := m.tokenStore.SaltID(saltCtx, auth.ClientToken)
- if err != nil {
- return err
- }
-
- leaseID := path.Join(te.Path, saltedID)
- if tokenNS.ID != namespace.RootNamespaceID {
- leaseID = fmt.Sprintf("%s.%s", leaseID, tokenNS.ID)
- }
-
- // Create a lease entry
- le := leaseEntry{
- LeaseID: leaseID,
- ClientToken: auth.ClientToken,
- Auth: auth,
- Path: te.Path,
- IssueTime: time.Now(),
- ExpireTime: auth.ExpirationTime(),
- namespace: tokenNS,
- }
-
- // Encode the entry
- if err := m.persistEntry(ctx, &le); err != nil {
- return err
- }
-
- // Setup revocation timer
- m.updatePending(&le, auth.LeaseTotal())
-
- return nil
-}
-
-// FetchLeaseTimesByToken is a helper function to use token values to compute
-// the leaseID, rather than pushing that logic back into the token store.
-// As a special case, for a batch token it simply returns the information
-// encoded on it.
-func (m *ExpirationManager) FetchLeaseTimesByToken(ctx context.Context, te *logical.TokenEntry) (*leaseEntry, error) {
- defer metrics.MeasureSince([]string{"expire", "fetch-lease-times-by-token"}, time.Now())
-
- if te == nil {
- return nil, errors.New("cannot fetch lease times for nil token")
- }
-
- if te.Type == logical.TokenTypeBatch {
- issueTime := time.Unix(te.CreationTime, 0)
- return &leaseEntry{
- IssueTime: issueTime,
- ExpireTime: issueTime.Add(te.TTL),
- ClientTokenType: logical.TokenTypeBatch,
- }, nil
- }
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core)
- if err != nil {
- return nil, err
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID)
- if err != nil {
- return nil, err
- }
-
- leaseID := path.Join(te.Path, saltedID)
-
- if tokenNS.ID != namespace.RootNamespaceID {
- leaseID = fmt.Sprintf("%s.%s", leaseID, tokenNS.ID)
- }
-
- return m.FetchLeaseTimes(ctx, leaseID)
-}
-
-// FetchLeaseTimes is used to fetch the issue time, expiration time, and last
-// renewed time of a lease entry. It returns a leaseEntry itself, but with only
-// those values copied over.
-func (m *ExpirationManager) FetchLeaseTimes(ctx context.Context, leaseID string) (*leaseEntry, error) {
- defer metrics.MeasureSince([]string{"expire", "fetch-lease-times"}, time.Now())
-
- m.pendingLock.RLock()
- val := m.pending[leaseID]
- m.pendingLock.RUnlock()
-
- if val.exportLeaseTimes != nil {
- return val.exportLeaseTimes, nil
- }
-
- // Load the entry
- le, err := m.loadEntryInternal(ctx, leaseID, true, false)
- if err != nil {
- return nil, err
- }
- if le == nil {
- return nil, nil
- }
-
- return m.leaseTimesForExport(le), nil
-}
-
-// Returns lease times for outside callers based on the full leaseEntry passed in
-func (m *ExpirationManager) leaseTimesForExport(le *leaseEntry) *leaseEntry {
- ret := &leaseEntry{
- IssueTime: le.IssueTime,
- ExpireTime: le.ExpireTime,
- LastRenewalTime: le.LastRenewalTime,
- }
- if le.Secret != nil {
- ret.Secret = &logical.Secret{}
- ret.Secret.Renewable = le.Secret.Renewable
- ret.Secret.TTL = le.Secret.TTL
- }
- if le.Auth != nil {
- ret.Auth = &logical.Auth{}
- ret.Auth.Renewable = le.Auth.Renewable
- ret.Auth.TTL = le.Auth.TTL
- }
-
- return ret
-}
-
-// updatePending is used to update a pending invocation for a lease
-func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Duration) {
- m.pendingLock.Lock()
- defer m.pendingLock.Unlock()
-
- m.updatePendingInternal(le, leaseTotal)
-}
-
-// updatePendingInternal is the locked version of updatePending; do not call
-// this without a write lock on m.pending
-func (m *ExpirationManager) updatePendingInternal(le *leaseEntry, leaseTotal time.Duration) {
- // Check for an existing timer
- pending, ok := m.pending[le.LeaseID]
-
- // If there is no expiry time, don't do anything
- if le.ExpireTime.IsZero() {
- // if the timer happened to exist, stop the time and delete it from the
- // pending timers.
- if ok {
- pending.timer.Stop()
- delete(m.pending, le.LeaseID)
- }
- return
- }
-
- // Create entry if it does not exist or reset if it does
- if ok {
- pending.timer.Reset(leaseTotal)
- } else {
- timer := time.AfterFunc(leaseTotal, func() {
- m.expireFunc(m.quitContext, m, le)
- })
- pending = pendingInfo{
- timer: timer,
- }
- }
-
- // Extend the timer by the lease total
- pending.exportLeaseTimes = m.leaseTimesForExport(le)
-
- m.pending[le.LeaseID] = pending
-}
-
-// revokeEntry is used to attempt revocation of an internal entry
-func (m *ExpirationManager) revokeEntry(ctx context.Context, le *leaseEntry) error {
- // Revocation of login tokens is special since we can by-pass the
- // backend and directly interact with the token store
- if le.Auth != nil {
- if le.ClientTokenType == logical.TokenTypeBatch {
- return errors.New("batch tokens cannot be revoked")
- }
-
- if err := m.tokenStore.revokeTree(ctx, le); err != nil {
- return errwrap.Wrapf("failed to revoke token: {{err}}", err)
- }
-
- return nil
- }
-
- if le.Secret != nil {
- // not sure if this is really valid to have a leaseEntry with a nil Secret
- // (if there's a nil Secret, what are you really leasing?), but the tests
- // create one, and good to be defensive
- le.Secret.IssueTime = le.IssueTime
- }
-
- // Make sure we're operating in the right namespace
- nsCtx := namespace.ContextWithNamespace(ctx, le.namespace)
-
- // Handle standard revocation via backends
- resp, err := m.router.Route(nsCtx, logical.RevokeRequest(le.Path, le.Secret, le.Data))
- if err != nil || (resp != nil && resp.IsError()) {
- return errwrap.Wrapf(fmt.Sprintf("failed to revoke entry: resp: %#v err: {{err}}", resp), err)
- }
- return nil
-}
-
-// renewEntry is used to attempt renew of an internal entry
-func (m *ExpirationManager) renewEntry(ctx context.Context, le *leaseEntry, increment time.Duration) (*logical.Response, error) {
- secret := *le.Secret
- secret.IssueTime = le.IssueTime
- secret.Increment = increment
- secret.LeaseID = ""
-
- // Make sure we're operating in the right namespace
- nsCtx := namespace.ContextWithNamespace(ctx, le.namespace)
-
- req := logical.RenewRequest(le.Path, &secret, le.Data)
- resp, err := m.router.Route(nsCtx, req)
- if err != nil || (resp != nil && resp.IsError()) {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to renew entry: resp: %#v err: {{err}}", resp), err)
- }
- return resp, nil
-}
-
-// renewAuthEntry is used to attempt renew of an auth entry. Only the token
-// store should get the actual token ID intact.
-func (m *ExpirationManager) renewAuthEntry(ctx context.Context, req *logical.Request, le *leaseEntry, increment time.Duration) (*logical.Response, error) {
- if le.ClientTokenType == logical.TokenTypeBatch {
- return logical.ErrorResponse("batch tokens cannot be renewed"), nil
- }
-
- auth := *le.Auth
- auth.IssueTime = le.IssueTime
- auth.Increment = increment
- if strings.HasPrefix(le.Path, "auth/token/") {
- auth.ClientToken = le.ClientToken
- } else {
- auth.ClientToken = ""
- }
-
- // Make sure we're operating in the right namespace
- nsCtx := namespace.ContextWithNamespace(ctx, le.namespace)
-
- authReq := logical.RenewAuthRequest(le.Path, &auth, nil)
- authReq.Connection = req.Connection
- resp, err := m.router.Route(nsCtx, authReq)
- if err != nil {
- return nil, errwrap.Wrapf("failed to renew entry: {{err}}", err)
- }
- return resp, nil
-}
-
-// loadEntry is used to read a lease entry
-func (m *ExpirationManager) loadEntry(ctx context.Context, leaseID string) (*leaseEntry, error) {
- // Take out the lease locks after we ensure we are in restore mode
- restoreMode := m.inRestoreMode()
- if restoreMode {
- m.restoreModeLock.RLock()
- defer m.restoreModeLock.RUnlock()
-
- restoreMode = m.inRestoreMode()
- if restoreMode {
- m.lockLease(leaseID)
- defer m.unlockLease(leaseID)
- }
- }
-
- _, nsID := namespace.SplitIDFromString(leaseID)
- if nsID != "" {
- leaseNS, err := NamespaceByID(ctx, nsID, m.core)
- if err != nil {
- return nil, err
- }
- if leaseNS != nil {
- ctx = namespace.ContextWithNamespace(ctx, leaseNS)
- }
- } else {
- ctx = namespace.ContextWithNamespace(ctx, namespace.RootNamespace)
- }
- return m.loadEntryInternal(ctx, leaseID, restoreMode, true)
-}
-
-// loadEntryInternal is used when you need to load an entry but also need to
-// control the lifecycle of the restoreLock
-func (m *ExpirationManager) loadEntryInternal(ctx context.Context, leaseID string, restoreMode bool, checkRestored bool) (*leaseEntry, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- view := m.leaseView(ns)
- out, err := view.Get(ctx, leaseID)
- if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read lease entry %s: {{err}}", leaseID), err)
- }
- if out == nil {
- return nil, nil
- }
- le, err := decodeLeaseEntry(out.Value)
- if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode lease entry %s: {{err}}", leaseID), err)
- }
- le.namespace = ns
-
- if restoreMode {
- if checkRestored {
- // If we have already loaded this lease, we don't need to update on
- // load. In the case of renewal and revocation, updatePending will be
- // done after making the appropriate modifications to the lease.
- if _, ok := m.restoreLoaded.Load(leaseID); ok {
- return le, nil
- }
- }
-
- // Update the cache of restored leases, either synchronously or through
- // the lazy loaded restore process
- m.restoreLoaded.Store(le.LeaseID, struct{}{})
-
- // Setup revocation timer
- m.updatePending(le, le.ExpireTime.Sub(time.Now()))
- }
- return le, nil
-}
-
-// persistEntry is used to persist a lease entry
-func (m *ExpirationManager) persistEntry(ctx context.Context, le *leaseEntry) error {
- // Encode the entry
- buf, err := le.encode()
- if err != nil {
- return errwrap.Wrapf("failed to encode lease entry: {{err}}", err)
- }
-
- // Write out to the view
- ent := logical.StorageEntry{
- Key: le.LeaseID,
- Value: buf,
- }
- if le.Auth != nil && len(le.Auth.Policies) == 1 && le.Auth.Policies[0] == "root" {
- ent.SealWrap = true
- }
-
- view := m.leaseView(le.namespace)
- if err := view.Put(ctx, &ent); err != nil {
- return errwrap.Wrapf("failed to persist lease entry: {{err}}", err)
- }
- return nil
-}
-
-// deleteEntry is used to delete a lease entry
-func (m *ExpirationManager) deleteEntry(ctx context.Context, le *leaseEntry) error {
- view := m.leaseView(le.namespace)
- if err := view.Delete(ctx, le.LeaseID); err != nil {
- return errwrap.Wrapf("failed to delete lease entry: {{err}}", err)
- }
- return nil
-}
-
-// createIndexByToken creates a secondary index from the token to a lease entry
-func (m *ExpirationManager) createIndexByToken(ctx context.Context, le *leaseEntry, token string) error {
- tokenNS := namespace.RootNamespace
- saltCtx := namespace.ContextWithNamespace(ctx, namespace.RootNamespace)
- _, nsID := namespace.SplitIDFromString(token)
- if nsID != "" {
- tokenNS, err := NamespaceByID(ctx, nsID, m.core)
- if err != nil {
- return err
- }
- if tokenNS != nil {
- saltCtx = namespace.ContextWithNamespace(ctx, tokenNS)
- }
- }
-
- saltedID, err := m.tokenStore.SaltID(saltCtx, token)
- if err != nil {
- return err
- }
-
- leaseSaltedID, err := m.tokenStore.SaltID(saltCtx, le.LeaseID)
- if err != nil {
- return err
- }
-
- ent := logical.StorageEntry{
- Key: saltedID + "/" + leaseSaltedID,
- Value: []byte(le.LeaseID),
- }
- tokenView := m.tokenIndexView(tokenNS)
- if err := tokenView.Put(ctx, &ent); err != nil {
- return errwrap.Wrapf("failed to persist lease index entry: {{err}}", err)
- }
- return nil
-}
-
-// indexByToken looks up the secondary index from the token to a lease entry
-func (m *ExpirationManager) indexByToken(ctx context.Context, le *leaseEntry) (*logical.StorageEntry, error) {
- tokenNS := namespace.RootNamespace
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- _, nsID := namespace.SplitIDFromString(le.ClientToken)
- if nsID != "" {
- tokenNS, err := NamespaceByID(ctx, nsID, m.core)
- if err != nil {
- return nil, err
- }
- if tokenNS != nil {
- saltCtx = namespace.ContextWithNamespace(ctx, tokenNS)
- }
- }
-
- saltedID, err := m.tokenStore.SaltID(saltCtx, le.ClientToken)
- if err != nil {
- return nil, err
- }
-
- leaseSaltedID, err := m.tokenStore.SaltID(saltCtx, le.LeaseID)
- if err != nil {
- return nil, err
- }
-
- key := saltedID + "/" + leaseSaltedID
- tokenView := m.tokenIndexView(tokenNS)
- entry, err := tokenView.Get(ctx, key)
- if err != nil {
- return nil, fmt.Errorf("failed to look up secondary index entry")
- }
- return entry, nil
-}
-
-// removeIndexByToken removes the secondary index from the token to a lease entry
-func (m *ExpirationManager) removeIndexByToken(ctx context.Context, le *leaseEntry) error {
- tokenNS := namespace.RootNamespace
- saltCtx := namespace.ContextWithNamespace(ctx, namespace.RootNamespace)
- _, nsID := namespace.SplitIDFromString(le.ClientToken)
- if nsID != "" {
- tokenNS, err := NamespaceByID(ctx, nsID, m.core)
- if err != nil {
- return err
- }
- if tokenNS != nil {
- saltCtx = namespace.ContextWithNamespace(ctx, tokenNS)
- }
- }
-
- saltedID, err := m.tokenStore.SaltID(saltCtx, le.ClientToken)
- if err != nil {
- return err
- }
-
- leaseSaltedID, err := m.tokenStore.SaltID(saltCtx, le.LeaseID)
- if err != nil {
- return err
- }
-
- key := saltedID + "/" + leaseSaltedID
- tokenView := m.tokenIndexView(tokenNS)
- if err := tokenView.Delete(ctx, key); err != nil {
- return errwrap.Wrapf("failed to delete lease index entry: {{err}}", err)
- }
- return nil
-}
-
-// CreateOrFetchRevocationLeaseByToken is used to create or fetch the matching
-// leaseID for a particular token. The lease is set to expire immediately after
-// it's created.
-func (m *ExpirationManager) CreateOrFetchRevocationLeaseByToken(ctx context.Context, te *logical.TokenEntry) (string, error) {
- // Fetch the saltedID of the token and construct the leaseID
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core)
- if err != nil {
- return "", err
- }
- if tokenNS == nil {
- return "", namespace.ErrNoNamespace
- }
-
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID)
- if err != nil {
- return "", err
- }
- leaseID := path.Join(te.Path, saltedID)
-
- if tokenNS.ID != namespace.RootNamespaceID {
- leaseID = fmt.Sprintf("%s.%s", leaseID, tokenNS.ID)
- }
-
- // Load the entry
- le, err := m.loadEntry(ctx, leaseID)
- if err != nil {
- return "", err
- }
-
- // If there's no associated leaseEntry for the token, we create one
- if le == nil {
- auth := &logical.Auth{
- ClientToken: te.ID,
- LeaseOptions: logical.LeaseOptions{
- TTL: time.Nanosecond,
- },
- }
-
- if strings.Contains(te.Path, "..") {
- return "", consts.ErrPathContainsParentReferences
- }
-
- // Create a lease entry
- now := time.Now()
- le = &leaseEntry{
- LeaseID: leaseID,
- ClientToken: auth.ClientToken,
- Auth: auth,
- Path: te.Path,
- IssueTime: now,
- ExpireTime: now.Add(time.Nanosecond),
- namespace: tokenNS,
- }
-
- // Encode the entry
- if err := m.persistEntry(ctx, le); err != nil {
- return "", err
- }
- }
-
- return le.LeaseID, nil
-}
-
-// lookupLeasesByToken is used to lookup all the leaseID's via the tokenID
-func (m *ExpirationManager) lookupLeasesByToken(ctx context.Context, te *logical.TokenEntry) ([]string, error) {
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, m.core)
- if err != nil {
- return nil, err
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- saltedID, err := m.tokenStore.SaltID(saltCtx, te.ID)
- if err != nil {
- return nil, err
- }
-
- tokenView := m.tokenIndexView(tokenNS)
-
- // Scan via the index for sub-leases
- prefix := saltedID + "/"
- subKeys, err := tokenView.List(ctx, prefix)
- if err != nil {
- return nil, errwrap.Wrapf("failed to list leases: {{err}}", err)
- }
-
- // Read each index entry
- leaseIDs := make([]string, 0, len(subKeys))
- for _, sub := range subKeys {
- out, err := tokenView.Get(ctx, prefix+sub)
- if err != nil {
- return nil, errwrap.Wrapf("failed to read lease index: {{err}}", err)
- }
- if out == nil {
- continue
- }
- leaseIDs = append(leaseIDs, string(out.Value))
- }
- return leaseIDs, nil
-}
-
-// emitMetrics is invoked periodically to emit statistics
-func (m *ExpirationManager) emitMetrics() {
- m.pendingLock.RLock()
- num := len(m.pending)
- m.pendingLock.RUnlock()
- metrics.SetGauge([]string{"expire", "num_leases"}, float32(num))
- // Check if lease count is greater than the threshold
- if num > maxLeaseThreshold {
- if atomic.LoadUint32(m.leaseCheckCounter) > 59 {
- m.logger.Warn("lease count exceeds warning lease threshold")
- atomic.StoreUint32(m.leaseCheckCounter, 0)
- } else {
- atomic.AddUint32(m.leaseCheckCounter, 1)
- }
- }
-}
-
-// leaseEntry is used to structure the values the expiration
-// manager stores. This is used to handle renew and revocation.
-type leaseEntry struct {
- LeaseID string `json:"lease_id"`
- ClientToken string `json:"client_token"`
- ClientTokenType logical.TokenType `json:"token_type"`
- Path string `json:"path"`
- Data map[string]interface{} `json:"data"`
- Secret *logical.Secret `json:"secret"`
- Auth *logical.Auth `json:"auth"`
- IssueTime time.Time `json:"issue_time"`
- ExpireTime time.Time `json:"expire_time"`
- LastRenewalTime time.Time `json:"last_renewal_time"`
-
- namespace *namespace.Namespace
-}
-
-// encode is used to JSON encode the lease entry
-func (le *leaseEntry) encode() ([]byte, error) {
- return json.Marshal(le)
-}
-
-func (le *leaseEntry) renewable() (bool, error) {
- switch {
- // If there is no entry, cannot review to renew
- case le == nil:
- return false, fmt.Errorf("lease not found")
-
- case le.ExpireTime.IsZero():
- return false, fmt.Errorf("lease is not renewable")
-
- case le.ClientTokenType == logical.TokenTypeBatch:
- return false, nil
-
- // Determine if the lease is expired
- case le.ExpireTime.Before(time.Now()):
- return false, fmt.Errorf("lease expired")
-
- // Determine if the lease is renewable
- case le.Secret != nil && !le.Secret.Renewable:
- return false, fmt.Errorf("lease is not renewable")
-
- case le.Auth != nil && !le.Auth.Renewable:
- return false, fmt.Errorf("lease is not renewable")
- }
-
- return true, nil
-}
-
-func (le *leaseEntry) ttl() int64 {
- return int64(le.ExpireTime.Sub(time.Now().Round(time.Second)).Seconds())
-}
-
-// decodeLeaseEntry is used to reverse encode and return a new entry
-func decodeLeaseEntry(buf []byte) (*leaseEntry, error) {
- out := new(leaseEntry)
- return out, jsonutil.DecodeJSON(buf, out)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/expiration_util.go b/vendor/github.com/hashicorp/vault/vault/expiration_util.go
deleted file mode 100644
index ab1454a2..00000000
--- a/vendor/github.com/hashicorp/vault/vault/expiration_util.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
-)
-
-func (m *ExpirationManager) leaseView(*namespace.Namespace) *BarrierView {
- return m.idView
-}
-
-func (m *ExpirationManager) tokenIndexView(*namespace.Namespace) *BarrierView {
- return m.tokenView
-}
-
-func (m *ExpirationManager) collectLeases() (map[*namespace.Namespace][]string, int, error) {
- leaseCount := 0
- existing := make(map[*namespace.Namespace][]string)
- keys, err := logical.CollectKeys(m.quitContext, m.leaseView(namespace.RootNamespace))
- if err != nil {
- return nil, 0, errwrap.Wrapf("failed to scan for leases: {{err}}", err)
- }
- existing[namespace.RootNamespace] = keys
- leaseCount += len(keys)
- return existing, leaseCount, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/generate_root.go b/vendor/github.com/hashicorp/vault/vault/generate_root.go
deleted file mode 100644
index 43443424..00000000
--- a/vendor/github.com/hashicorp/vault/vault/generate_root.go
+++ /dev/null
@@ -1,369 +0,0 @@
-package vault
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "fmt"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/helper/xor"
- "github.com/hashicorp/vault/shamir"
-)
-
-const coreDROperationTokenPath = "core/dr-operation-token"
-
-var (
- // GenerateStandardRootTokenStrategy is the strategy used to generate a
- // typical root token
- GenerateStandardRootTokenStrategy GenerateRootStrategy = generateStandardRootToken{}
-
- // GenerateDROperationTokenStrategy is the strategy used to generate a
- // DR operational token
- GenerateDROperationTokenStrategy GenerateRootStrategy = generateStandardRootToken{}
-)
-
-// GenerateRootStrategy allows us to swap out the strategy we want to use to
-// create a token upon completion of the generate root process.
-type GenerateRootStrategy interface {
- generate(context.Context, *Core) (string, func(), error)
-}
-
-// generateStandardRootToken implements the GenerateRootStrategy and is in
-// charge of creating standard root tokens.
-type generateStandardRootToken struct{}
-
-func (g generateStandardRootToken) generate(ctx context.Context, c *Core) (string, func(), error) {
- te, err := c.tokenStore.rootToken(ctx)
- if err != nil {
- c.logger.Error("root token generation failed", "error", err)
- return "", nil, err
- }
- if te == nil {
- c.logger.Error("got nil token entry back from root generation")
- return "", nil, fmt.Errorf("got nil token entry back from root generation")
- }
-
- cleanupFunc := func() {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- }
-
- return te.ID, cleanupFunc, nil
-}
-
-// GenerateRootConfig holds the configuration for a root generation
-// command.
-type GenerateRootConfig struct {
- Nonce string
- PGPKey string
- PGPFingerprint string
- OTP string
- Strategy GenerateRootStrategy
-}
-
-// GenerateRootResult holds the result of a root generation update
-// command
-type GenerateRootResult struct {
- Progress int
- Required int
- EncodedToken string
- PGPFingerprint string
-}
-
-// GenerateRootProgress is used to return the root generation progress (num shares)
-func (c *Core) GenerateRootProgress() (int, error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return 0, consts.ErrSealed
- }
- if c.standby {
- return 0, consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- return len(c.generateRootProgress), nil
-}
-
-// GenerateRootConfiguration is used to read the root generation configuration
-// It stubbornly refuses to return the OTP if one is there.
-func (c *Core) GenerateRootConfiguration() (*GenerateRootConfig, error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, consts.ErrSealed
- }
- if c.standby {
- return nil, consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Copy the config if any
- var conf *GenerateRootConfig
- if c.generateRootConfig != nil {
- conf = new(GenerateRootConfig)
- *conf = *c.generateRootConfig
- conf.OTP = ""
- conf.Strategy = nil
- }
- return conf, nil
-}
-
-// GenerateRootInit is used to initialize the root generation settings
-func (c *Core) GenerateRootInit(otp, pgpKey string, strategy GenerateRootStrategy) error {
- var fingerprint string
- switch {
- case len(otp) > 0:
- if len(otp) != TokenLength+2 {
- return fmt.Errorf("OTP string is wrong length")
- }
-
- case len(pgpKey) > 0:
- fingerprints, err := pgpkeys.GetFingerprints([]string{pgpKey}, nil)
- if err != nil {
- return errwrap.Wrapf("error parsing PGP key: {{err}}", err)
- }
- if len(fingerprints) != 1 || fingerprints[0] == "" {
- return fmt.Errorf("could not acquire PGP key entity")
- }
- fingerprint = fingerprints[0]
-
- default:
- return fmt.Errorf("otp or pgp_key parameter must be provided")
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return consts.ErrSealed
- }
- if c.standby {
- return consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Prevent multiple concurrent root generations
- if c.generateRootConfig != nil {
- return fmt.Errorf("root generation already in progress")
- }
-
- // Copy the configuration
- generationNonce, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
-
- c.generateRootConfig = &GenerateRootConfig{
- Nonce: generationNonce,
- OTP: otp,
- PGPKey: pgpKey,
- PGPFingerprint: fingerprint,
- Strategy: strategy,
- }
-
- if c.logger.IsInfo() {
- switch strategy.(type) {
- case generateStandardRootToken:
- c.logger.Info("root generation initialized", "nonce", c.generateRootConfig.Nonce)
- default:
- c.logger.Info("dr operation token generation initialized", "nonce", c.generateRootConfig.Nonce)
- }
- }
-
- return nil
-}
-
-// GenerateRootUpdate is used to provide a new key part
-func (c *Core) GenerateRootUpdate(ctx context.Context, key []byte, nonce string, strategy GenerateRootStrategy) (*GenerateRootResult, error) {
- // Verify the key length
- min, max := c.barrier.KeyLength()
- max += shamir.ShareOverhead
- if len(key) < min {
- return nil, &ErrInvalidKey{fmt.Sprintf("key is shorter than minimum %d bytes", min)}
- }
- if len(key) > max {
- return nil, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
- }
-
- // Get the seal configuration
- var config *SealConfig
- var err error
- if c.seal.RecoveryKeySupported() {
- config, err = c.seal.RecoveryConfig(ctx)
- if err != nil {
- return nil, err
- }
- } else {
- config, err = c.seal.BarrierConfig(ctx)
- if err != nil {
- return nil, err
- }
- }
-
- // Ensure the barrier is initialized
- if config == nil {
- return nil, ErrNotInit
- }
-
- // Ensure we are already unsealed
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, consts.ErrSealed
- }
- if c.standby {
- return nil, consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Ensure a generateRoot is in progress
- if c.generateRootConfig == nil {
- return nil, fmt.Errorf("no root generation in progress")
- }
-
- if nonce != c.generateRootConfig.Nonce {
- return nil, fmt.Errorf("incorrect nonce supplied; nonce for this root generation operation is %q", c.generateRootConfig.Nonce)
- }
-
- if strategy != c.generateRootConfig.Strategy {
- return nil, fmt.Errorf("incorrect strategy supplied; a generate root operation of another type is already in progress")
- }
-
- // Check if we already have this piece
- for _, existing := range c.generateRootProgress {
- if bytes.Equal(existing, key) {
- return nil, fmt.Errorf("given key has already been provided during this generation operation")
- }
- }
-
- // Store this key
- c.generateRootProgress = append(c.generateRootProgress, key)
- progress := len(c.generateRootProgress)
-
- // Check if we don't have enough keys to unlock
- if len(c.generateRootProgress) < config.SecretThreshold {
- if c.logger.IsDebug() {
- c.logger.Debug("cannot generate root, not enough keys", "keys", progress, "threshold", config.SecretThreshold)
- }
- return &GenerateRootResult{
- Progress: progress,
- Required: config.SecretThreshold,
- PGPFingerprint: c.generateRootConfig.PGPFingerprint,
- }, nil
- }
-
- // Recover the master key
- var masterKey []byte
- if config.SecretThreshold == 1 {
- masterKey = c.generateRootProgress[0]
- c.generateRootProgress = nil
- } else {
- masterKey, err = shamir.Combine(c.generateRootProgress)
- c.generateRootProgress = nil
- if err != nil {
- return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err)
- }
- }
-
- // Verify the master key
- if c.seal.RecoveryKeySupported() {
- if err := c.seal.VerifyRecoveryKey(ctx, masterKey); err != nil {
- c.logger.Error("root generation aborted, recovery key verification failed", "error", err)
- return nil, err
- }
- } else {
- if err := c.barrier.VerifyMaster(masterKey); err != nil {
- c.logger.Error("root generation aborted, master key verification failed", "error", err)
- return nil, err
- }
- }
-
- // Run the generate strategy
- token, cleanupFunc, err := strategy.generate(ctx, c)
- if err != nil {
- return nil, err
- }
-
- var tokenBytes []byte
-
- // Get the encoded value first so that if there is an error we don't create
- // the root token.
- switch {
- case len(c.generateRootConfig.OTP) > 0:
- // This function performs decoding checks so rather than decode the OTP,
- // just encode the value we're passing in.
- tokenBytes, err = xor.XORBytes([]byte(c.generateRootConfig.OTP), []byte(token))
- if err != nil {
- cleanupFunc()
- c.logger.Error("xor of root token failed", "error", err)
- return nil, err
- }
- token = base64.RawStdEncoding.EncodeToString(tokenBytes)
-
- case len(c.generateRootConfig.PGPKey) > 0:
- _, tokenBytesArr, err := pgpkeys.EncryptShares([][]byte{[]byte(token)}, []string{c.generateRootConfig.PGPKey})
- if err != nil {
- cleanupFunc()
- c.logger.Error("error encrypting new root token", "error", err)
- return nil, err
- }
- token = base64.StdEncoding.EncodeToString(tokenBytesArr[0])
-
- default:
- cleanupFunc()
- return nil, fmt.Errorf("unreachable condition")
- }
-
- results := &GenerateRootResult{
- Progress: progress,
- Required: config.SecretThreshold,
- EncodedToken: token,
- PGPFingerprint: c.generateRootConfig.PGPFingerprint,
- }
-
- switch strategy.(type) {
- case generateStandardRootToken:
- if c.logger.IsInfo() {
- c.logger.Info("root generation finished", "nonce", c.generateRootConfig.Nonce)
- }
- default:
- if c.logger.IsInfo() {
- c.logger.Info("dr operation token generation finished", "nonce", c.generateRootConfig.Nonce)
- }
- }
-
- c.generateRootProgress = nil
- c.generateRootConfig = nil
- return results, nil
-}
-
-// GenerateRootCancel is used to cancel an in-progress root generation
-func (c *Core) GenerateRootCancel() error {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return consts.ErrSealed
- }
- if c.standby {
- return consts.ErrStandby
- }
-
- c.generateRootLock.Lock()
- defer c.generateRootLock.Unlock()
-
- // Clear any progress or config
- c.generateRootConfig = nil
- c.generateRootProgress = nil
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/ha.go b/vendor/github.com/hashicorp/vault/vault/ha.go
deleted file mode 100644
index cd0d1467..00000000
--- a/vendor/github.com/hashicorp/vault/vault/ha.go
+++ /dev/null
@@ -1,875 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/x509"
- "errors"
- "fmt"
- "sync/atomic"
- "time"
-
- metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- multierror "github.com/hashicorp/go-multierror"
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
- "github.com/oklog/run"
-)
-
-const (
- // lockRetryInterval is the interval we re-attempt to acquire the
- // HA lock if an error is encountered
- lockRetryInterval = 10 * time.Second
-
- // leaderCheckInterval is how often a standby checks for a new leader
- leaderCheckInterval = 2500 * time.Millisecond
-
- // keyRotateCheckInterval is how often a standby checks for a key
- // rotation taking place.
- keyRotateCheckInterval = 10 * time.Second
-
- // keyRotateGracePeriod is how long we allow an upgrade path
- // for standby instances before we delete the upgrade keys
- keyRotateGracePeriod = 2 * time.Minute
-
- // leaderPrefixCleanDelay is how long to wait between deletions
- // of orphaned leader keys, to prevent slamming the backend.
- leaderPrefixCleanDelay = 200 * time.Millisecond
-)
-
-var (
- addEnterpriseHaActors func(*Core, *run.Group) chan func() = addEnterpriseHaActorsNoop
- interruptPerfStandby func(chan func(), chan struct{}) chan struct{} = interruptPerfStandbyNoop
-)
-
-func addEnterpriseHaActorsNoop(*Core, *run.Group) chan func() { return nil }
-func interruptPerfStandbyNoop(chan func(), chan struct{}) chan struct{} {
- return make(chan struct{})
-}
-
-// Standby checks if the Vault is in standby mode
-func (c *Core) Standby() (bool, error) {
- c.stateLock.RLock()
- standby := c.standby
- c.stateLock.RUnlock()
- return standby, nil
-}
-
-// PerfStandby checks if the vault is a performance standby
-func (c *Core) PerfStandby() bool {
- c.stateLock.RLock()
- perfStandby := c.perfStandby
- c.stateLock.RUnlock()
- return perfStandby
-}
-
-// Leader is used to get the current active leader
-func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) {
- // Check if HA enabled. We don't need the lock for this check as it's set
- // on startup and never modified
- if c.ha == nil {
- return false, "", "", ErrHANotEnabled
- }
-
- // Check if sealed
- if c.Sealed() {
- return false, "", "", consts.ErrSealed
- }
-
- c.stateLock.RLock()
-
- // Check if we are the leader
- if !c.standby {
- c.stateLock.RUnlock()
- return true, c.redirectAddr, c.clusterAddr, nil
- }
-
- // Initialize a lock
- lock, err := c.ha.LockWith(CoreLockPath, "read")
- if err != nil {
- c.stateLock.RUnlock()
- return false, "", "", err
- }
-
- // Read the value
- held, leaderUUID, err := lock.Value()
- if err != nil {
- c.stateLock.RUnlock()
- return false, "", "", err
- }
- if !held {
- c.stateLock.RUnlock()
- return false, "", "", nil
- }
-
- c.clusterLeaderParamsLock.RLock()
- localLeaderUUID := c.clusterLeaderUUID
- localRedirAddr := c.clusterLeaderRedirectAddr
- localClusterAddr := c.clusterLeaderClusterAddr
- c.clusterLeaderParamsLock.RUnlock()
-
- // If the leader hasn't changed, return the cached value; nothing changes
- // mid-leadership, and the barrier caches anyways
- if leaderUUID == localLeaderUUID && localRedirAddr != "" {
- c.stateLock.RUnlock()
- return false, localRedirAddr, localClusterAddr, nil
- }
-
- c.logger.Trace("found new active node information, refreshing")
-
- defer c.stateLock.RUnlock()
- c.clusterLeaderParamsLock.Lock()
- defer c.clusterLeaderParamsLock.Unlock()
-
- // Validate base conditions again
- if leaderUUID == c.clusterLeaderUUID && c.clusterLeaderRedirectAddr != "" {
- return false, localRedirAddr, localClusterAddr, nil
- }
-
- key := coreLeaderPrefix + leaderUUID
- // Use background because postUnseal isn't run on standby
- entry, err := c.barrier.Get(context.Background(), key)
- if err != nil {
- return false, "", "", err
- }
- if entry == nil {
- return false, "", "", nil
- }
-
- var oldAdv bool
-
- var adv activeAdvertisement
- err = jsonutil.DecodeJSON(entry.Value, &adv)
- if err != nil {
- // Fall back to pre-struct handling
- adv.RedirectAddr = string(entry.Value)
- c.logger.Debug("parsed redirect addr for new active node", "redirect_addr", adv.RedirectAddr)
- oldAdv = true
- }
-
- if !oldAdv {
- c.logger.Debug("parsing information for new active node", "active_cluster_addr", adv.ClusterAddr, "active_redirect_addr", adv.RedirectAddr)
-
- // Ensure we are using current values
- err = c.loadLocalClusterTLS(adv)
- if err != nil {
- return false, "", "", err
- }
-
- // This will ensure that we both have a connection at the ready and that
- // the address is the current known value
- // Since this is standby, we don't use the active context. Later we may
- // use a process-scoped context
- err = c.refreshRequestForwardingConnection(context.Background(), adv.ClusterAddr)
- if err != nil {
- return false, "", "", err
- }
- }
-
- // Don't set these until everything has been parsed successfully or we'll
- // never try again
- c.clusterLeaderRedirectAddr = adv.RedirectAddr
- c.clusterLeaderClusterAddr = adv.ClusterAddr
- c.clusterLeaderUUID = leaderUUID
-
- return false, adv.RedirectAddr, adv.ClusterAddr, nil
-}
-
-// StepDown is used to step down from leadership
-func (c *Core) StepDown(httpCtx context.Context, req *logical.Request) (retErr error) {
- defer metrics.MeasureSince([]string{"core", "step_down"}, time.Now())
-
- if req == nil {
- retErr = multierror.Append(retErr, errors.New("nil request to step-down"))
- return retErr
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil
- }
- if c.ha == nil || c.standby {
- return nil
- }
-
- ctx, cancel := context.WithCancel(namespace.RootContext(nil))
- defer cancel()
-
- go func() {
- select {
- case <-ctx.Done():
- case <-httpCtx.Done():
- cancel()
- }
- }()
-
- acl, te, entity, identityPolicies, err := c.fetchACLTokenEntryAndEntity(ctx, req)
- if err != nil {
- if errwrap.ContainsType(err, new(TemplateError)) {
- c.logger.Warn("permission denied due to a templated policy being invalid or containing directives not satisfied by the requestor", "error", err)
- err = logical.ErrPermissionDenied
- }
- retErr = multierror.Append(retErr, err)
- return retErr
- }
-
- // Audit-log the request before going any further
- auth := &logical.Auth{
- ClientToken: req.ClientToken,
- Accessor: req.ClientTokenAccessor,
- }
- if te != nil {
- auth.IdentityPolicies = identityPolicies[te.NamespaceID]
- delete(identityPolicies, te.NamespaceID)
- auth.ExternalNamespacePolicies = identityPolicies
- auth.TokenPolicies = te.Policies
- auth.Policies = append(te.Policies, identityPolicies[te.NamespaceID]...)
- auth.Metadata = te.Meta
- auth.DisplayName = te.DisplayName
- auth.EntityID = te.EntityID
- auth.TokenType = te.Type
- }
-
- logInput := &audit.LogInput{
- Auth: auth,
- Request: req,
- }
- if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
- c.logger.Error("failed to audit request", "request_path", req.Path, "error", err)
- retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
- return retErr
- }
-
- if entity != nil && entity.Disabled {
- c.logger.Warn("permission denied as the entity on the token is disabled")
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
-
- if te != nil && te.EntityID != "" && entity == nil {
- c.logger.Warn("permission denied as the entity on the token is invalid")
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- c.stateLock.RUnlock()
- return retErr
- }
-
- // Attempt to use the token (decrement num_uses)
- if te != nil {
- te, err = c.tokenStore.UseToken(ctx, te)
- if err != nil {
- c.logger.Error("failed to use token", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return retErr
- }
- if te == nil {
- // Token has been revoked
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- return retErr
- }
- }
-
- // Verify that this operation is allowed
- authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
- RootPrivsRequired: true,
- })
- if !authResults.Allowed {
- retErr = multierror.Append(retErr, authResults.Error)
- if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError {
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- }
- return retErr
- }
-
- if te != nil && te.NumUses == tokenRevocationPending {
- // Token needs to be revoked. We do this immediately here because
- // we won't have a token store after sealing.
- leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(c.activeContext, te)
- if err == nil {
- err = c.expiration.Revoke(c.activeContext, leaseID)
- }
- if err != nil {
- c.logger.Error("token needed revocation before step-down but failed to revoke", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- }
- }
-
- select {
- case c.manualStepDownCh <- struct{}{}:
- default:
- c.logger.Warn("manual step-down operation already queued")
- }
-
- return retErr
-}
-
-// runStandby is a long running process that manages a number of the HA
-// subsystems.
-func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) {
- defer close(doneCh)
- defer close(manualStepDownCh)
- c.logger.Info("entering standby mode")
-
- var g run.Group
- newLeaderCh := addEnterpriseHaActors(c, &g)
- {
- // This will cause all the other actors to close when the stop channel
- // is closed.
- g.Add(func() error {
- <-stopCh
- return nil
- }, func(error) {})
- }
- {
- // Monitor for key rotation
- keyRotateStop := make(chan struct{})
-
- g.Add(func() error {
- c.periodicCheckKeyUpgrade(context.Background(), keyRotateStop)
- return nil
- }, func(error) {
- close(keyRotateStop)
- c.logger.Debug("shutting down periodic key rotation checker")
- })
- }
- {
- // Monitor for new leadership
- checkLeaderStop := make(chan struct{})
-
- g.Add(func() error {
- c.periodicLeaderRefresh(newLeaderCh, checkLeaderStop)
- return nil
- }, func(error) {
- close(checkLeaderStop)
- c.logger.Debug("shutting down periodic leader refresh")
- })
- }
- {
- // Wait for leadership
- leaderStopCh := make(chan struct{})
-
- g.Add(func() error {
- c.waitForLeadership(newLeaderCh, manualStepDownCh, leaderStopCh)
- return nil
- }, func(error) {
- close(leaderStopCh)
- c.logger.Debug("shutting down leader elections")
- })
- }
-
- // Start all the actors
- g.Run()
-}
-
-// waitForLeadership is a long running routine that is used when an HA backend
-// is enabled. It waits until we are leader and switches this Vault to
-// active.
-func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stopCh chan struct{}) {
- var manualStepDown bool
- for {
- // Check for a shutdown
- select {
- case <-stopCh:
- c.logger.Debug("stop channel triggered in runStandby")
- return
- default:
- // If we've just down, we could instantly grab the lock again. Give
- // the other nodes a chance.
- if manualStepDown {
- time.Sleep(manualStepDownSleepPeriod)
- manualStepDown = false
- }
- }
-
- // Create a lock
- uuid, err := uuid.GenerateUUID()
- if err != nil {
- c.logger.Error("failed to generate uuid", "error", err)
- return
- }
- lock, err := c.ha.LockWith(CoreLockPath, uuid)
- if err != nil {
- c.logger.Error("failed to create lock", "error", err)
- return
- }
-
- // Attempt the acquisition
- leaderLostCh := c.acquireLock(lock, stopCh)
-
- // Bail if we are being shutdown
- if leaderLostCh == nil {
- return
- }
- c.logger.Info("acquired lock, enabling active operation")
-
- // This is used later to log a metrics event; this can be helpful to
- // detect flapping
- activeTime := time.Now()
-
- continueCh := interruptPerfStandby(newLeaderCh, stopCh)
- // Grab the statelock or stop
- if stopped := grabLockOrStop(c.stateLock.Lock, c.stateLock.Unlock, stopCh); stopped {
- lock.Unlock()
- close(continueCh)
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- return
- }
-
- if c.Sealed() {
- c.logger.Warn("grabbed HA lock but already sealed, exiting")
- lock.Unlock()
- close(continueCh)
- c.stateLock.Unlock()
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- return
- }
-
- // Store the lock so that we can manually clear it later if needed
- c.heldHALock = lock
-
- // Create the active context
- activeCtx, activeCtxCancel := context.WithCancel(namespace.RootContext(nil))
- c.activeContext = activeCtx
- c.activeContextCancelFunc.Store(activeCtxCancel)
-
- // This block is used to wipe barrier/seal state and verify that
- // everything is sane. If we have no sanity in the barrier, we actually
- // seal, as there's little we can do.
- {
- c.seal.SetBarrierConfig(activeCtx, nil)
- if c.seal.RecoveryKeySupported() {
- c.seal.SetRecoveryConfig(activeCtx, nil)
- }
-
- if err := c.performKeyUpgrades(activeCtx); err != nil {
- // We call this in a goroutine so that we can give up the
- // statelock and have this shut us down; sealInternal has a
- // workflow where it watches for the stopCh to close so we want
- // to return from here
- c.logger.Error("error performing key upgrades", "error", err)
- go c.Shutdown()
- c.heldHALock = nil
- lock.Unlock()
- close(continueCh)
- c.stateLock.Unlock()
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- return
- }
- }
-
- {
- // Clear previous local cluster cert info so we generate new. Since the
- // UUID will have changed, standbys will know to look for new info
- c.localClusterParsedCert.Store((*x509.Certificate)(nil))
- c.localClusterCert.Store(([]byte)(nil))
- c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil))
-
- if err := c.setupCluster(activeCtx); err != nil {
- c.heldHALock = nil
- lock.Unlock()
- close(continueCh)
- c.stateLock.Unlock()
- c.logger.Error("cluster setup failed", "error", err)
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- continue
- }
-
- }
- // Advertise as leader
- if err := c.advertiseLeader(activeCtx, uuid, leaderLostCh); err != nil {
- c.heldHALock = nil
- lock.Unlock()
- close(continueCh)
- c.stateLock.Unlock()
- c.logger.Error("leader advertisement setup failed", "error", err)
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- continue
- }
-
- // Attempt the post-unseal process
- err = c.postUnseal(activeCtx, activeCtxCancel, standardUnsealStrategy{})
- if err == nil {
- c.standby = false
- }
-
- close(continueCh)
- c.stateLock.Unlock()
-
- // Handle a failure to unseal
- if err != nil {
- c.logger.Error("post-unseal setup failed", "error", err)
- lock.Unlock()
- metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
- continue
- }
-
- // Monitor a loss of leadership
- select {
- case <-leaderLostCh:
- c.logger.Warn("leadership lost, stopping active operation")
- case <-stopCh:
- case <-manualStepDownCh:
- manualStepDown = true
- c.logger.Warn("stepping down from active operation to standby")
- }
-
- // Stop Active Duty
- {
- // Spawn this in a go routine so we can cancel the context and
- // unblock any inflight requests that are holding the statelock.
- go func() {
- select {
- case <-activeCtx.Done():
- // Attempt to drain any inflight requests
- case <-time.After(DefaultMaxRequestDuration):
- activeCtxCancel()
- }
- }()
-
- // Grab lock if we are not stopped
- stopped := grabLockOrStop(c.stateLock.Lock, c.stateLock.Unlock, stopCh)
-
- // Cancel the context incase the above go routine hasn't done it
- // yet
- activeCtxCancel()
- metrics.MeasureSince([]string{"core", "leadership_lost"}, activeTime)
-
- // Mark as standby
- c.standby = true
-
- // Seal
- if err := c.preSeal(); err != nil {
- c.logger.Error("pre-seal teardown failed", "error", err)
- }
-
- // If we are not meant to keep the HA lock, clear it
- if atomic.LoadUint32(c.keepHALockOnStepDown) == 0 {
- if err := c.clearLeader(uuid); err != nil {
- c.logger.Error("clearing leader advertisement failed", "error", err)
- }
-
- c.heldHALock.Unlock()
- c.heldHALock = nil
- }
-
- // If we are stopped return, otherwise unlock the statelock
- if stopped {
- return
- }
- c.stateLock.Unlock()
- }
- }
-}
-
-func grabLockOrStop(lockFunc, unlockFunc func(), stopCh chan struct{}) (stopped bool) {
- // Grab the lock as we need it for cluster setup, which needs to happen
- // before advertising;
- lockGrabbedCh := make(chan struct{})
- go func() {
- // Grab the lock
- lockFunc()
- // If stopCh has been closed, which only happens while the
- // stateLock is held, we have actually terminated, so we just
- // instantly give up the lock, otherwise we notify that it's ready
- // for consumption
- select {
- case <-stopCh:
- unlockFunc()
- default:
- close(lockGrabbedCh)
- }
- }()
-
- select {
- case <-stopCh:
- return true
- case <-lockGrabbedCh:
- // We now have the lock and can use it
- }
-
- return false
-}
-
-// This checks the leader periodically to ensure that we switch RPC to a new
-// leader pretty quickly. There is logic in Leader() already to not make this
-// onerous and avoid more traffic than needed, so we just call that and ignore
-// the result.
-func (c *Core) periodicLeaderRefresh(newLeaderCh chan func(), stopCh chan struct{}) {
- opCount := new(int32)
-
- clusterAddr := ""
- for {
- select {
- case <-time.After(leaderCheckInterval):
- count := atomic.AddInt32(opCount, 1)
- if count > 1 {
- atomic.AddInt32(opCount, -1)
- continue
- }
- // We do this in a goroutine because otherwise if this refresh is
- // called while we're shutting down the call to Leader() can
- // deadlock, which then means stopCh can never been seen and we can
- // block shutdown
- go func() {
- // Bind locally, as the race detector is tripping here
- lopCount := opCount
- isLeader, _, newClusterAddr, _ := c.Leader()
-
- if !isLeader && newClusterAddr != clusterAddr && newLeaderCh != nil {
- select {
- case newLeaderCh <- nil:
- c.logger.Debug("new leader found, triggering new leader channel")
- clusterAddr = newClusterAddr
- default:
- c.logger.Debug("new leader found, but still processing previous leader change")
- }
-
- }
- atomic.AddInt32(lopCount, -1)
- }()
- case <-stopCh:
- return
- }
- }
-}
-
-// periodicCheckKeyUpgrade is used to watch for key rotation events as a standby
-func (c *Core) periodicCheckKeyUpgrade(ctx context.Context, stopCh chan struct{}) {
- opCount := new(int32)
- for {
- select {
- case <-time.After(keyRotateCheckInterval):
- count := atomic.AddInt32(opCount, 1)
- if count > 1 {
- atomic.AddInt32(opCount, -1)
- continue
- }
-
- go func() {
- // Bind locally, as the race detector is tripping here
- lopCount := opCount
-
- // Only check if we are a standby
- c.stateLock.RLock()
- standby := c.standby
- c.stateLock.RUnlock()
- if !standby {
- atomic.AddInt32(lopCount, -1)
- return
- }
-
- // Check for a poison pill. If we can read it, it means we have stale
- // keys (e.g. from replication being activated) and we need to seal to
- // be unsealed again.
- entry, _ := c.barrier.Get(ctx, poisonPillPath)
- if entry != nil && len(entry.Value) > 0 {
- c.logger.Warn("encryption keys have changed out from underneath us (possibly due to replication enabling), must be unsealed again")
- go c.Shutdown()
- atomic.AddInt32(lopCount, -1)
- return
- }
-
- if err := c.checkKeyUpgrades(ctx); err != nil {
- c.logger.Error("key rotation periodic upgrade check failed", "error", err)
- }
-
- atomic.AddInt32(lopCount, -1)
- return
- }()
- case <-stopCh:
- return
- }
- }
-}
-
-// checkKeyUpgrades is used to check if there have been any key rotations
-// and if there is a chain of upgrades available
-func (c *Core) checkKeyUpgrades(ctx context.Context) error {
- for {
- // Check for an upgrade
- didUpgrade, newTerm, err := c.barrier.CheckUpgrade(ctx)
- if err != nil {
- return err
- }
-
- // Nothing to do if no upgrade
- if !didUpgrade {
- break
- }
- if c.logger.IsInfo() {
- c.logger.Info("upgraded to new key term", "term", newTerm)
- }
- }
- return nil
-}
-
-func (c *Core) performKeyUpgrades(ctx context.Context) error {
- if err := c.checkKeyUpgrades(ctx); err != nil {
- return errwrap.Wrapf("error checking for key upgrades: {{err}}", err)
- }
-
- if err := c.barrier.ReloadMasterKey(ctx); err != nil {
- return errwrap.Wrapf("error reloading master key: {{err}}", err)
- }
-
- if err := c.barrier.ReloadKeyring(ctx); err != nil {
- return errwrap.Wrapf("error reloading keyring: {{err}}", err)
- }
-
- if err := c.scheduleUpgradeCleanup(ctx); err != nil {
- return errwrap.Wrapf("error scheduling upgrade cleanup: {{err}}", err)
- }
-
- return nil
-}
-
-// scheduleUpgradeCleanup is used to ensure that all the upgrade paths
-// are cleaned up in a timely manner if a leader failover takes place
-func (c *Core) scheduleUpgradeCleanup(ctx context.Context) error {
- // List the upgrades
- upgrades, err := c.barrier.List(ctx, keyringUpgradePrefix)
- if err != nil {
- return errwrap.Wrapf("failed to list upgrades: {{err}}", err)
- }
-
- // Nothing to do if no upgrades
- if len(upgrades) == 0 {
- return nil
- }
-
- // Schedule cleanup for all of them
- time.AfterFunc(keyRotateGracePeriod, func() {
- sealed, err := c.barrier.Sealed()
- if err != nil {
- c.logger.Warn("failed to check barrier status at upgrade cleanup time")
- return
- }
- if sealed {
- c.logger.Warn("barrier sealed at upgrade cleanup time")
- return
- }
- for _, upgrade := range upgrades {
- path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
- if err := c.barrier.Delete(ctx, path); err != nil {
- c.logger.Error("failed to cleanup upgrade", "path", path, "error", err)
- }
- }
- })
- return nil
-}
-
-// acquireLock blocks until the lock is acquired, returning the leaderLostCh
-func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan struct{} {
- for {
- // Attempt lock acquisition
- leaderLostCh, err := lock.Lock(stopCh)
- if err == nil {
- return leaderLostCh
- }
-
- // Retry the acquisition
- c.logger.Error("failed to acquire lock", "error", err)
- select {
- case <-time.After(lockRetryInterval):
- case <-stopCh:
- return nil
- }
- }
-}
-
-// advertiseLeader is used to advertise the current node as leader
-func (c *Core) advertiseLeader(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) error {
- go c.cleanLeaderPrefix(ctx, uuid, leaderLostCh)
-
- var key *ecdsa.PrivateKey
- switch c.localClusterPrivateKey.Load().(type) {
- case *ecdsa.PrivateKey:
- key = c.localClusterPrivateKey.Load().(*ecdsa.PrivateKey)
- default:
- c.logger.Error("unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey.Load()))
- return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey.Load())
- }
-
- keyParams := &clusterKeyParams{
- Type: corePrivateKeyTypeP521,
- X: key.X,
- Y: key.Y,
- D: key.D,
- }
-
- locCert := c.localClusterCert.Load().([]byte)
- localCert := make([]byte, len(locCert))
- copy(localCert, locCert)
- adv := &activeAdvertisement{
- RedirectAddr: c.redirectAddr,
- ClusterAddr: c.clusterAddr,
- ClusterCert: localCert,
- ClusterKeyParams: keyParams,
- }
- val, err := jsonutil.EncodeJSON(adv)
- if err != nil {
- return err
- }
- ent := &Entry{
- Key: coreLeaderPrefix + uuid,
- Value: val,
- }
- err = c.barrier.Put(ctx, ent)
- if err != nil {
- return err
- }
-
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifyActiveStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("failed to notify active status", "error", err)
- }
- }
- }
- return nil
-}
-
-func (c *Core) cleanLeaderPrefix(ctx context.Context, uuid string, leaderLostCh <-chan struct{}) {
- keys, err := c.barrier.List(ctx, coreLeaderPrefix)
- if err != nil {
- c.logger.Error("failed to list entries in core/leader", "error", err)
- return
- }
- for len(keys) > 0 {
- select {
- case <-time.After(leaderPrefixCleanDelay):
- if keys[0] != uuid {
- c.barrier.Delete(ctx, coreLeaderPrefix+keys[0])
- }
- keys = keys[1:]
- case <-leaderLostCh:
- return
- }
- }
-}
-
-// clearLeader is used to clear our leadership entry
-func (c *Core) clearLeader(uuid string) error {
- key := coreLeaderPrefix + uuid
- err := c.barrier.Delete(context.Background(), key)
-
- // Advertise ourselves as a standby
- sd, ok := c.ha.(physical.ServiceDiscovery)
- if ok {
- if err := sd.NotifyActiveStateChange(); err != nil {
- if c.logger.IsWarn() {
- c.logger.Warn("failed to notify standby status", "error", err)
- }
- }
- }
-
- return err
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_lookup.go b/vendor/github.com/hashicorp/vault/vault/identity_lookup.go
deleted file mode 100644
index 4a4b0eb0..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_lookup.go
+++ /dev/null
@@ -1,329 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func lookupPaths(i *IdentityStore) []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "lookup/entity$",
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the entity.",
- },
- "id": {
- Type: framework.TypeString,
- Description: "ID of the entity.",
- },
- "alias_id": {
- Type: framework.TypeString,
- Description: "ID of the alias.",
- },
- "alias_name": {
- Type: framework.TypeString,
- Description: "Name of the alias. This should be supplied in conjunction with 'alias_mount_accessor'.",
- },
- "alias_mount_accessor": {
- Type: framework.TypeString,
- Description: "Accessor of the mount to which the alias belongs to. This should be supplied in conjunction with 'alias_name'.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathLookupEntityUpdate(),
- },
-
- HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-entity"][0]),
- HelpDescription: strings.TrimSpace(lookupHelp["lookup-entity"][1]),
- },
- {
- Pattern: "lookup/group$",
- Fields: map[string]*framework.FieldSchema{
- "name": {
- Type: framework.TypeString,
- Description: "Name of the group.",
- },
- "id": {
- Type: framework.TypeString,
- Description: "ID of the group.",
- },
- "alias_id": {
- Type: framework.TypeString,
- Description: "ID of the alias.",
- },
- "alias_name": {
- Type: framework.TypeString,
- Description: "Name of the alias. This should be supplied in conjunction with 'alias_mount_accessor'.",
- },
- "alias_mount_accessor": {
- Type: framework.TypeString,
- Description: "Accessor of the mount to which the alias belongs to. This should be supplied in conjunction with 'alias_name'.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathLookupGroupUpdate(),
- },
-
- HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-group"][0]),
- HelpDescription: strings.TrimSpace(lookupHelp["lookup-group"][1]),
- },
- }
-}
-
-func (i *IdentityStore) pathLookupEntityUpdate() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var entity *identity.Entity
- var err error
-
- inputCount := 0
-
- id := ""
- idRaw, ok := d.GetOk("id")
- if ok {
- inputCount++
- id = idRaw.(string)
- }
-
- name := ""
- nameRaw, ok := d.GetOk("name")
- if ok {
- inputCount++
- name = nameRaw.(string)
- }
-
- aliasID := ""
- aliasIDRaw, ok := d.GetOk("alias_id")
- if ok {
- inputCount++
- aliasID = aliasIDRaw.(string)
- }
-
- aliasName := ""
- aliasNameRaw, ok := d.GetOk("alias_name")
- if ok {
- inputCount++
- aliasName = aliasNameRaw.(string)
- }
-
- aliasMountAccessor := ""
- aliasMountAccessorRaw, ok := d.GetOk("alias_mount_accessor")
- if ok {
- inputCount++
- aliasMountAccessor = aliasMountAccessorRaw.(string)
- }
-
- switch {
- case inputCount == 0:
- return logical.ErrorResponse(fmt.Sprintf("query parameter not supplied")), nil
-
- case inputCount != 1:
- switch {
- case inputCount == 2 && aliasName != "" && aliasMountAccessor != "":
- default:
- return logical.ErrorResponse(fmt.Sprintf("query parameter conflict; please supply distinct set of query parameters")), nil
- }
-
- case inputCount == 1:
- switch {
- case aliasName != "" || aliasMountAccessor != "":
- return logical.ErrorResponse(fmt.Sprintf("both 'alias_name' and 'alias_mount_accessor' needs to be set")), nil
- }
- }
-
- switch {
- case id != "":
- entity, err = i.MemDBEntityByID(id, false)
- if err != nil {
- return nil, err
- }
-
- case name != "":
- entity, err = i.MemDBEntityByName(ctx, name, false)
- if err != nil {
- return nil, err
- }
-
- case aliasID != "":
- alias, err := i.MemDBAliasByID(aliasID, false, false)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- break
- }
-
- entity, err = i.MemDBEntityByAliasID(alias.ID, false)
- if err != nil {
- return nil, err
- }
-
- case aliasName != "" && aliasMountAccessor != "":
- alias, err := i.MemDBAliasByFactors(aliasMountAccessor, aliasName, false, false)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- break
- }
-
- entity, err = i.MemDBEntityByAliasID(alias.ID, false)
- if err != nil {
- return nil, err
- }
- }
-
- if entity == nil {
- return nil, nil
- }
-
- return i.handleEntityReadCommon(ctx, entity)
- }
-}
-
-func (i *IdentityStore) pathLookupGroupUpdate() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var group *identity.Group
- var err error
-
- inputCount := 0
-
- id := ""
- idRaw, ok := d.GetOk("id")
- if ok {
- inputCount++
- id = idRaw.(string)
- }
-
- name := ""
- nameRaw, ok := d.GetOk("name")
- if ok {
- inputCount++
- name = nameRaw.(string)
- }
-
- aliasID := ""
- aliasIDRaw, ok := d.GetOk("alias_id")
- if ok {
- inputCount++
- aliasID = aliasIDRaw.(string)
- }
-
- aliasName := ""
- aliasNameRaw, ok := d.GetOk("alias_name")
- if ok {
- inputCount++
- aliasName = aliasNameRaw.(string)
- }
-
- aliasMountAccessor := ""
- aliasMountAccessorRaw, ok := d.GetOk("alias_mount_accessor")
- if ok {
- inputCount++
- aliasMountAccessor = aliasMountAccessorRaw.(string)
- }
-
- switch {
- case inputCount == 0:
- return logical.ErrorResponse(fmt.Sprintf("query parameter not supplied")), nil
-
- case inputCount != 1:
- switch {
- case inputCount == 2 && aliasName != "" && aliasMountAccessor != "":
- default:
- return logical.ErrorResponse(fmt.Sprintf("query parameter conflict; please supply distinct set of query parameters")), nil
- }
-
- case inputCount == 1:
- switch {
- case aliasName != "" || aliasMountAccessor != "":
- return logical.ErrorResponse(fmt.Sprintf("both 'alias_name' and 'alias_mount_accessor' needs to be set")), nil
- }
- }
-
- switch {
- case id != "":
- group, err = i.MemDBGroupByID(id, false)
- if err != nil {
- return nil, err
- }
- case name != "":
- group, err = i.MemDBGroupByName(ctx, name, false)
- if err != nil {
- return nil, err
- }
- case aliasID != "":
- alias, err := i.MemDBAliasByID(aliasID, false, true)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- break
- }
-
- group, err = i.MemDBGroupByAliasID(alias.ID, false)
- if err != nil {
- return nil, err
- }
-
- case aliasName != "" && aliasMountAccessor != "":
- alias, err := i.MemDBAliasByFactors(aliasMountAccessor, aliasName, false, true)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- break
- }
-
- group, err = i.MemDBGroupByAliasID(alias.ID, false)
- if err != nil {
- return nil, err
- }
- }
-
- if group == nil {
- return nil, nil
- }
-
- return i.handleGroupReadCommon(ctx, group)
- }
-}
-
-var lookupHelp = map[string][2]string{
- "lookup-entity": {
- "Query entities based on various properties.",
- `Distinct query parameters to be set:
- - 'id'
- To query the entity by its ID.
- - 'name'
- To query the entity by its name.
- - 'alias_id'
- To query the entity by the ID of any of its aliases.
- - 'alias_name' and 'alias_mount_accessor'
- To query the entity by the unique factors that represent an alias; the name and the mount accessor.
- `,
- },
- "lookup-group": {
- "Query groups based on various properties.",
- `Distinct query parameters to be set:
- - 'id'
- To query the group by its ID.
- - 'name'
- To query the group by its name.
- - 'alias_id'
- To query the group by the ID of any of its aliases.
- - 'alias_name' and 'alias_mount_accessor'
- To query the group by the unique factors that represent an alias; the name and the mount accessor.
- `,
- },
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store.go b/vendor/github.com/hashicorp/vault/vault/identity_store.go
deleted file mode 100644
index c2976ce6..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store.go
+++ /dev/null
@@ -1,493 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/golang/protobuf/ptypes"
- "github.com/hashicorp/errwrap"
- log "github.com/hashicorp/go-hclog"
- memdb "github.com/hashicorp/go-memdb"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/storagepacker"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- groupBucketsPrefix = "packer/group/buckets/"
-)
-
-var (
- sendGroupUpgrade = func(*IdentityStore, *identity.Group) (bool, error) { return false, nil }
- parseExtraEntityFromBucket = func(context.Context, *IdentityStore, *identity.Entity) (bool, error) { return false, nil }
- addExtraEntityDataToResponse = func(*identity.Entity, map[string]interface{}) {}
-)
-
-func (c *Core) IdentityStore() *IdentityStore {
- return c.identityStore
-}
-
-func (i *IdentityStore) resetDB(ctx context.Context) error {
- var err error
-
- i.db, err = memdb.NewMemDB(identityStoreSchema(!i.disableLowerCasedNames))
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendConfig, logger log.Logger) (*IdentityStore, error) {
- iStore := &IdentityStore{
- view: config.StorageView,
- logger: logger,
- core: core,
- }
-
- // Create a memdb instance, which by default, operates on lower cased
- // identity names
- err := iStore.resetDB(ctx)
- if err != nil {
- return nil, err
- }
-
- entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities")
- core.AddLogger(entitiesPackerLogger)
- groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups")
- core.AddLogger(groupsPackerLogger)
- iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "")
- if err != nil {
- return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err)
- }
-
- iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, groupsPackerLogger, groupBucketsPrefix)
- if err != nil {
- return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err)
- }
-
- iStore.Backend = &framework.Backend{
- BackendType: logical.TypeLogical,
- Paths: iStore.paths(),
- Invalidate: iStore.Invalidate,
- }
-
- err = iStore.Setup(ctx, config)
- if err != nil {
- return nil, err
- }
-
- return iStore, nil
-}
-
-func (i *IdentityStore) paths() []*framework.Path {
- return framework.PathAppend(
- entityPaths(i),
- aliasPaths(i),
- groupAliasPaths(i),
- groupPaths(i),
- lookupPaths(i),
- upgradePaths(i),
- )
-}
-
-// Invalidate is a callback wherein the backend is informed that the value at
-// the given key is updated. In identity store's case, it would be the entity
-// storage entries that get updated. The value needs to be read and MemDB needs
-// to be updated accordingly.
-func (i *IdentityStore) Invalidate(ctx context.Context, key string) {
- i.logger.Debug("invalidate notification received", "key", key)
-
- i.lock.Lock()
- defer i.lock.Unlock()
-
- switch {
- // Check if the key is a storage entry key for an entity bucket
- case strings.HasPrefix(key, storagepacker.StoragePackerBucketsPrefix):
- // Get the hash value of the storage bucket entry key
- bucketKeyHash := i.entityPacker.BucketKeyHashByKey(key)
- if len(bucketKeyHash) == 0 {
- i.logger.Error("failed to get the bucket entry key hash")
- return
- }
-
- // Create a MemDB transaction
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Each entity object in MemDB holds the MD5 hash of the storage
- // entry key of the entity bucket. Fetch all the entities that
- // belong to this bucket using the hash value. Remove these entities
- // from MemDB along with all the aliases of each entity.
- entitiesFetched, err := i.MemDBEntitiesByBucketEntryKeyHashInTxn(txn, string(bucketKeyHash))
- if err != nil {
- i.logger.Error("failed to fetch entities using the bucket entry key hash", "bucket_entry_key_hash", bucketKeyHash)
- return
- }
-
- for _, entity := range entitiesFetched {
- // Delete all the aliases in the entity. This function will also remove
- // the corresponding alias indexes too.
- err = i.deleteAliasesInEntityInTxn(txn, entity, entity.Aliases)
- if err != nil {
- i.logger.Error("failed to delete aliases in entity", "entity_id", entity.ID, "error", err)
- return
- }
-
- // Delete the entity using the same transaction
- err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID)
- if err != nil {
- i.logger.Error("failed to delete entity from MemDB", "entity_id", entity.ID, "error", err)
- return
- }
- }
-
- // Get the storage bucket entry
- bucket, err := i.entityPacker.GetBucket(key)
- if err != nil {
- i.logger.Error("failed to refresh entities", "key", key, "error", err)
- return
- }
-
- // If the underlying entry is nil, it means that this invalidation
- // notification is for the deletion of the underlying storage entry. At
- // this point, since all the entities belonging to this bucket are
- // already removed, there is nothing else to be done. But, if the
- // storage entry is non-nil, its an indication of an update. In this
- // case, entities in the updated bucket needs to be reinserted into
- // MemDB.
- if bucket != nil {
- for _, item := range bucket.Items {
- entity, err := i.parseEntityFromBucketItem(ctx, item)
- if err != nil {
- i.logger.Error("failed to parse entity from bucket entry item", "error", err)
- return
- }
-
- // Only update MemDB and don't touch the storage
- err = i.upsertEntityInTxn(ctx, txn, entity, nil, false)
- if err != nil {
- i.logger.Error("failed to update entity in MemDB", "error", err)
- return
- }
- }
- }
-
- txn.Commit()
- return
-
- // Check if the key is a storage entry key for an group bucket
- case strings.HasPrefix(key, groupBucketsPrefix):
- // Get the hash value of the storage bucket entry key
- bucketKeyHash := i.groupPacker.BucketKeyHashByKey(key)
- if len(bucketKeyHash) == 0 {
- i.logger.Error("failed to get the bucket entry key hash")
- return
- }
-
- // Create a MemDB transaction
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- groupsFetched, err := i.MemDBGroupsByBucketEntryKeyHashInTxn(txn, string(bucketKeyHash))
- if err != nil {
- i.logger.Error("failed to fetch groups using the bucket entry key hash", "bucket_entry_key_hash", bucketKeyHash)
- return
- }
-
- for _, group := range groupsFetched {
- // Delete the group using the same transaction
- err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID)
- if err != nil {
- i.logger.Error("failed to delete group from MemDB", "group_id", group.ID, "error", err)
- return
- }
- }
-
- // Get the storage bucket entry
- bucket, err := i.groupPacker.GetBucket(key)
- if err != nil {
- i.logger.Error("failed to refresh group", "key", key, "error", err)
- return
- }
-
- if bucket != nil {
- for _, item := range bucket.Items {
- group, err := i.parseGroupFromBucketItem(item)
- if err != nil {
- i.logger.Error("failed to parse group from bucket entry item", "error", err)
- return
- }
-
- // Before updating the group, check if the group exists. If it
- // does, then delete the group alias from memdb, for the
- // invalidation would have sent an update.
- groupFetched, err := i.MemDBGroupByIDInTxn(txn, group.ID, true)
- if err != nil {
- i.logger.Error("failed to fetch group from MemDB", "error", err)
- return
- }
-
- // If the group has an alias remove it from memdb
- if groupFetched != nil && groupFetched.Alias != nil {
- err := i.MemDBDeleteAliasByIDInTxn(txn, groupFetched.Alias.ID, true)
- if err != nil {
- i.logger.Error("failed to delete old group alias from MemDB", "error", err)
- return
- }
- }
-
- // Only update MemDB and don't touch the storage
- err = i.UpsertGroupInTxn(txn, group, false)
- if err != nil {
- i.logger.Error("failed to update group in MemDB", "error", err)
- return
- }
- }
- }
-
- txn.Commit()
- return
- }
-}
-
-func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *storagepacker.Item) (*identity.Entity, error) {
- if item == nil {
- return nil, fmt.Errorf("nil item")
- }
-
- persistNeeded := false
-
- var entity identity.Entity
- err := ptypes.UnmarshalAny(item.Message, &entity)
- if err != nil {
- // If we encounter an error, it would mean that the format of the
- // entity is an older one. Try decoding using the older format and if
- // successful, upgrage the storage with the newer format.
- var oldEntity identity.EntityStorageEntry
- oldEntityErr := ptypes.UnmarshalAny(item.Message, &oldEntity)
- if oldEntityErr != nil {
- return nil, errwrap.Wrapf("failed to decode entity from storage bucket item: {{err}}", err)
- }
-
- i.logger.Debug("upgrading the entity using patch introduced with vault 0.8.2.1", "entity_id", oldEntity.ID)
-
- // Successfully decoded entity using older format. Entity is stored
- // with older format. Upgrade it.
- entity.ID = oldEntity.ID
- entity.Name = oldEntity.Name
- entity.Metadata = oldEntity.Metadata
- entity.CreationTime = oldEntity.CreationTime
- entity.LastUpdateTime = oldEntity.LastUpdateTime
- entity.MergedEntityIDs = oldEntity.MergedEntityIDs
- entity.Policies = oldEntity.Policies
- entity.BucketKeyHash = oldEntity.BucketKeyHash
- entity.MFASecrets = oldEntity.MFASecrets
- // Copy each alias individually since the format of aliases were
- // also different
- for _, oldAlias := range oldEntity.Personas {
- var newAlias identity.Alias
- newAlias.ID = oldAlias.ID
- newAlias.Name = oldAlias.Name
- newAlias.CanonicalID = oldAlias.EntityID
- newAlias.MountType = oldAlias.MountType
- newAlias.MountAccessor = oldAlias.MountAccessor
- newAlias.MountPath = oldAlias.MountPath
- newAlias.Metadata = oldAlias.Metadata
- newAlias.CreationTime = oldAlias.CreationTime
- newAlias.LastUpdateTime = oldAlias.LastUpdateTime
- newAlias.MergedFromCanonicalIDs = oldAlias.MergedFromEntityIDs
- entity.Aliases = append(entity.Aliases, &newAlias)
- }
-
- persistNeeded = true
- }
-
- pN, err := parseExtraEntityFromBucket(ctx, i, &entity)
- if err != nil {
- return nil, err
- }
- if pN {
- persistNeeded = true
- }
-
- if persistNeeded && !i.core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) {
- entityAsAny, err := ptypes.MarshalAny(&entity)
- if err != nil {
- return nil, err
- }
-
- item := &storagepacker.Item{
- ID: entity.ID,
- Message: entityAsAny,
- }
-
- // Store the entity with new format
- err = i.entityPacker.PutItem(item)
- if err != nil {
- return nil, err
- }
- }
-
- if entity.NamespaceID == "" {
- entity.NamespaceID = namespace.RootNamespaceID
- }
-
- return &entity, nil
-}
-
-func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*identity.Group, error) {
- if item == nil {
- return nil, fmt.Errorf("nil item")
- }
-
- var group identity.Group
- err := ptypes.UnmarshalAny(item.Message, &group)
- if err != nil {
- return nil, errwrap.Wrapf("failed to decode group from storage bucket item: {{err}}", err)
- }
-
- if group.NamespaceID == "" {
- group.NamespaceID = namespace.RootNamespaceID
- }
-
- return &group, nil
-}
-
-// entityByAliasFactors fetches the entity based on factors of alias, i.e mount
-// accessor and the alias name.
-func (i *IdentityStore) entityByAliasFactors(mountAccessor, aliasName string, clone bool) (*identity.Entity, error) {
- if mountAccessor == "" {
- return nil, fmt.Errorf("missing mount accessor")
- }
-
- if aliasName == "" {
- return nil, fmt.Errorf("missing alias name")
- }
-
- txn := i.db.Txn(false)
-
- return i.entityByAliasFactorsInTxn(txn, mountAccessor, aliasName, clone)
-}
-
-// entityByAlaisFactorsInTxn fetches the entity based on factors of alias, i.e
-// mount accessor and the alias name.
-func (i *IdentityStore) entityByAliasFactorsInTxn(txn *memdb.Txn, mountAccessor, aliasName string, clone bool) (*identity.Entity, error) {
- if txn == nil {
- return nil, fmt.Errorf("nil txn")
- }
-
- if mountAccessor == "" {
- return nil, fmt.Errorf("missing mount accessor")
- }
-
- if aliasName == "" {
- return nil, fmt.Errorf("missing alias name")
- }
-
- alias, err := i.MemDBAliasByFactorsInTxn(txn, mountAccessor, aliasName, false, false)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- return nil, nil
- }
-
- return i.MemDBEntityByAliasIDInTxn(txn, alias.ID, clone)
-}
-
-// CreateOrFetchEntity creates a new entity. This is used by core to
-// associate each login attempt by an alias to a unified entity in Vault.
-func (i *IdentityStore) CreateOrFetchEntity(ctx context.Context, alias *logical.Alias) (*identity.Entity, error) {
- var entity *identity.Entity
- var err error
-
- if alias == nil {
- return nil, fmt.Errorf("alias is nil")
- }
-
- if alias.Name == "" {
- return nil, fmt.Errorf("empty alias name")
- }
-
- mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor)
- if mountValidationResp == nil {
- return nil, fmt.Errorf("invalid mount accessor %q", alias.MountAccessor)
- }
-
- if mountValidationResp.MountLocal {
- return nil, fmt.Errorf("mount_accessor %q is of a local mount", alias.MountAccessor)
- }
-
- if mountValidationResp.MountType != alias.MountType {
- return nil, fmt.Errorf("mount accessor %q is not a mount of type %q", alias.MountAccessor, alias.MountType)
- }
-
- // Check if an entity already exists for the given alais
- entity, err = i.entityByAliasFactors(alias.MountAccessor, alias.Name, false)
- if err != nil {
- return nil, err
- }
- if entity != nil {
- return entity, nil
- }
-
- i.lock.Lock()
- defer i.lock.Unlock()
-
- // Create a MemDB transaction to update both alias and entity
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Check if an entity was created before acquiring the lock
- entity, err = i.entityByAliasFactorsInTxn(txn, alias.MountAccessor, alias.Name, false)
- if err != nil {
- return nil, err
- }
- if entity != nil {
- return entity, nil
- }
-
- entity = new(identity.Entity)
- err = i.sanitizeEntity(ctx, entity)
- if err != nil {
- return nil, err
- }
-
- // Create a new alias
- newAlias := &identity.Alias{
- CanonicalID: entity.ID,
- Name: alias.Name,
- MountAccessor: alias.MountAccessor,
- Metadata: alias.Metadata,
- MountPath: mountValidationResp.MountPath,
- MountType: mountValidationResp.MountType,
- }
-
- err = i.sanitizeAlias(ctx, newAlias)
- if err != nil {
- return nil, err
- }
-
- i.logger.Debug("creating a new entity", "alias", newAlias)
-
- // Append the new alias to the new entity
- entity.Aliases = []*identity.Alias{
- newAlias,
- }
-
- // Update MemDB and persist entity object
- err = i.upsertEntityInTxn(ctx, txn, entity, nil, true)
- if err != nil {
- return nil, err
- }
-
- txn.Commit()
-
- return entity, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_aliases.go b/vendor/github.com/hashicorp/vault/vault/identity_store_aliases.go
deleted file mode 100644
index 88259240..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_aliases.go
+++ /dev/null
@@ -1,443 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/golang/protobuf/ptypes"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/storagepacker"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// aliasPaths returns the API endpoints to operate on aliases.
-// Following are the paths supported:
-// entity-alias - To register/modify an alias
-// entity-alias/id - To read, modify, delete and list aliases based on their ID
-func aliasPaths(i *IdentityStore) []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "entity-alias$",
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the entity alias. If set, updates the corresponding entity alias.",
- },
- // entity_id is deprecated in favor of canonical_id
- "entity_id": {
- Type: framework.TypeString,
- Description: `Entity ID to which this alias belongs.
-This field is deprecated, use canonical_id.`,
- },
- "canonical_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this alias belongs",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "Mount accessor to which this alias belongs to; unused for a modify",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Name of the alias; unused for a modify",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleAliasUpdateCommon(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]),
- },
- {
- Pattern: "entity-alias/id/" + framework.GenericNameRegex("id"),
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the alias",
- },
- // entity_id is deprecated
- "entity_id": {
- Type: framework.TypeString,
- Description: `Entity ID to which this alias belongs to.
-This field is deprecated, use canonical_id.`,
- },
- "canonical_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this alias should be tied to",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "(Unused)",
- },
- "name": {
- Type: framework.TypeString,
- Description: "(Unused)",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleAliasUpdateCommon(),
- logical.ReadOperation: i.pathAliasIDRead(),
- logical.DeleteOperation: i.pathAliasIDDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]),
- },
- {
- Pattern: "entity-alias/id/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathAliasIDList(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id-list"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias-id-list"][1]),
- },
- }
-}
-
-// handleAliasUpdateCommon is used to update an alias
-func (i *IdentityStore) handleAliasUpdateCommon() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var err error
- var alias *identity.Alias
- var entity *identity.Entity
- var previousEntity *identity.Entity
-
- i.lock.Lock()
- defer i.lock.Unlock()
-
- // Check for update or create
- aliasID := d.Get("id").(string)
- if aliasID != "" {
- alias, err = i.MemDBAliasByID(aliasID, true, false)
- if err != nil {
- return nil, err
- }
- if alias == nil {
- return logical.ErrorResponse("invalid alias id"), nil
- }
- } else {
- alias = &identity.Alias{}
- }
-
- // Get entity id
- canonicalID := d.Get("canonical_id").(string)
- if canonicalID == "" {
- // For backwards compatibility
- canonicalID = d.Get("entity_id").(string)
- }
-
- // Get alias name
- if aliasName := d.Get("name").(string); aliasName == "" {
- if alias.Name == "" {
- return logical.ErrorResponse("missing alias name"), nil
- }
- } else {
- alias.Name = aliasName
- }
-
- // Get mount accessor
- if mountAccessor := d.Get("mount_accessor").(string); mountAccessor == "" {
- if alias.MountAccessor == "" {
- return logical.ErrorResponse("missing mount_accessor"), nil
- }
- } else {
- alias.MountAccessor = mountAccessor
- }
-
- mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor)
- if mountValidationResp == nil {
- return logical.ErrorResponse(fmt.Sprintf("invalid mount accessor %q", alias.MountAccessor)), nil
- }
- if mountValidationResp.MountLocal {
- return logical.ErrorResponse(fmt.Sprintf("mount_accessor %q is of a local mount", alias.MountAccessor)), nil
- }
-
- // Verify that the combination of alias name and mount is not
- // already tied to a different alias
- aliasByFactors, err := i.MemDBAliasByFactors(mountValidationResp.MountAccessor, alias.Name, false, false)
- if err != nil {
- return nil, err
- }
- if aliasByFactors != nil {
- // If it's a create we won't have an alias ID so this will correctly
- // bail. If it's an update alias will be the same as aliasbyfactors so
- // we don't need to transfer any info over
- if aliasByFactors.ID != alias.ID {
- return logical.ErrorResponse("combination of mount and alias name is already in use"), nil
- }
-
- // Fetch the entity to which the alias is tied. We don't need to append
- // here, so the only further checking is whether the canonical ID is
- // different
- entity, err = i.MemDBEntityByAliasID(alias.ID, true)
- if err != nil {
- return nil, err
- }
- if entity == nil {
- return nil, fmt.Errorf("existing alias is not associated with an entity")
- }
- } else if alias.ID != "" {
- // This is an update, not a create; if we have an associated entity
- // already, load it
- entity, err = i.MemDBEntityByAliasID(alias.ID, true)
- if err != nil {
- return nil, err
- }
- }
-
- resp := &logical.Response{}
-
- // If we found an existing alias we won't hit this condition because
- // canonicalID being empty will result in nil being returned in the block
- // above, so in this case we know that creating a new entity is the right
- // thing.
- if canonicalID == "" {
- entity = &identity.Entity{
- Aliases: []*identity.Alias{
- alias,
- },
- }
- } else {
- // If we can look up by the given canonical ID, see if this is a
- // transfer; otherwise if we found no previous entity but we find one
- // here, use it.
- canonicalEntity, err := i.MemDBEntityByID(canonicalID, true)
- if err != nil {
- return nil, err
- }
- if canonicalEntity == nil {
- return logical.ErrorResponse("invalid canonical ID"), nil
- }
- if entity == nil {
- // If entity is nil, we didn't find a previous alias from factors,
- // so append to this entity
- entity = canonicalEntity
- entity.Aliases = append(entity.Aliases, alias)
- } else if entity.ID != canonicalEntity.ID {
- // In this case we found an entity from alias factors or given
- // alias ID but it's not the same, so it's a migration
- previousEntity = entity
- entity = canonicalEntity
-
- for aliasIndex, item := range previousEntity.Aliases {
- if item.ID == alias.ID {
- previousEntity.Aliases = append(previousEntity.Aliases[:aliasIndex], previousEntity.Aliases[aliasIndex+1:]...)
- break
- }
- }
-
- entity.Aliases = append(entity.Aliases, alias)
- resp.AddWarning(fmt.Sprintf("alias is being transferred from entity %q to %q", previousEntity.ID, entity.ID))
- }
- }
-
- // ID creation and other validations; This is more useful for new entities
- // and may not perform anything for the existing entities. Placing the
- // check here to make the flow common for both new and existing entities.
- err = i.sanitizeEntity(ctx, entity)
- if err != nil {
- return nil, err
- }
-
- // Explicitly set to empty as in the past we incorrectly saved it
- alias.MountPath = ""
- alias.MountType = ""
-
- // Set the canonical ID in the alias index. This should be done after
- // sanitizing entity.
- alias.CanonicalID = entity.ID
-
- // ID creation and other validations
- err = i.sanitizeAlias(ctx, alias)
- if err != nil {
- return nil, err
- }
-
- for index, item := range entity.Aliases {
- if item.ID == alias.ID {
- entity.Aliases[index] = alias
- }
- }
-
- // Index entity and its aliases in MemDB and persist entity along with
- // aliases in storage. If the alias is being transferred over from
- // one entity to another, previous entity needs to get refreshed in MemDB
- // and persisted in storage as well.
- if err := i.upsertEntity(ctx, entity, previousEntity, true); err != nil {
- return nil, err
- }
-
- // Return ID of both alias and entity
- resp.Data = map[string]interface{}{
- "id": alias.ID,
- "canonical_id": entity.ID,
- }
-
- return resp, nil
- }
-}
-
-// pathAliasIDRead returns the properties of an alias for a given
-// alias ID
-func (i *IdentityStore) pathAliasIDRead() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- aliasID := d.Get("id").(string)
- if aliasID == "" {
- return logical.ErrorResponse("missing alias id"), nil
- }
-
- alias, err := i.MemDBAliasByID(aliasID, false, false)
- if err != nil {
- return nil, err
- }
-
- return i.handleAliasReadCommon(ctx, alias)
- }
-}
-
-func (i *IdentityStore) handleAliasReadCommon(ctx context.Context, alias *identity.Alias) (*logical.Response, error) {
- if alias == nil {
- return nil, nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns.ID != alias.NamespaceID {
- return nil, nil
- }
-
- respData := map[string]interface{}{}
- respData["id"] = alias.ID
- respData["canonical_id"] = alias.CanonicalID
- respData["mount_accessor"] = alias.MountAccessor
- respData["metadata"] = alias.Metadata
- respData["name"] = alias.Name
- respData["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs
-
- if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
- respData["mount_path"] = mountValidationResp.MountPath
- respData["mount_type"] = mountValidationResp.MountType
- }
-
- // Convert protobuf timestamp into RFC3339 format
- respData["creation_time"] = ptypes.TimestampString(alias.CreationTime)
- respData["last_update_time"] = ptypes.TimestampString(alias.LastUpdateTime)
-
- return &logical.Response{
- Data: respData,
- }, nil
-}
-
-// pathAliasIDDelete deletes the alias for a given alias ID
-func (i *IdentityStore) pathAliasIDDelete() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- aliasID := d.Get("id").(string)
- if aliasID == "" {
- return logical.ErrorResponse("missing alias ID"), nil
- }
-
- i.lock.Lock()
- defer i.lock.Unlock()
-
- // Create a MemDB transaction to delete entity
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Fetch the alias
- alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, false)
- if err != nil {
- return nil, err
- }
-
- // If there is no alias for the ID, do nothing
- if alias == nil {
- return nil, nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns.ID != alias.NamespaceID {
- return nil, logical.ErrUnsupportedPath
- }
-
- // Fetch the associated entity
- entity, err := i.MemDBEntityByAliasIDInTxn(txn, alias.ID, true)
- if err != nil {
- return nil, err
- }
-
- // If there is no entity tied to a valid alias, something is wrong
- if entity == nil {
- return nil, fmt.Errorf("alias not associated to an entity")
- }
-
- aliases := []*identity.Alias{
- alias,
- }
-
- // Delete alias from the entity object
- err = i.deleteAliasesInEntityInTxn(txn, entity, aliases)
- if err != nil {
- return nil, err
- }
-
- // Update the entity index in the entities table
- err = i.MemDBUpsertEntityInTxn(txn, entity)
- if err != nil {
- return nil, err
- }
-
- // Persist the entity object
- entityAsAny, err := ptypes.MarshalAny(entity)
- if err != nil {
- return nil, err
- }
- item := &storagepacker.Item{
- ID: entity.ID,
- Message: entityAsAny,
- }
-
- err = i.entityPacker.PutItem(item)
- if err != nil {
- return nil, err
- }
-
- // Committing the transaction *after* successfully updating entity in
- // storage
- txn.Commit()
-
- return nil, nil
- }
-}
-
-// pathAliasIDList lists the IDs of all the valid aliases in the identity
-// store
-func (i *IdentityStore) pathAliasIDList() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return i.handleAliasListCommon(ctx, false)
- }
-}
-
-var aliasHelp = map[string][2]string{
- "alias": {
- "Create a new alias.",
- "",
- },
- "alias-id": {
- "Update, read or delete an alias ID.",
- "",
- },
- "alias-id-list": {
- "List all the alias IDs.",
- "",
- },
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_entities.go b/vendor/github.com/hashicorp/vault/vault/identity_store_entities.go
deleted file mode 100644
index 4cfadb68..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_entities.go
+++ /dev/null
@@ -1,763 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- "github.com/golang/protobuf/ptypes"
- "github.com/hashicorp/errwrap"
- memdb "github.com/hashicorp/go-memdb"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/storagepacker"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func entityPathFields() map[string]*framework.FieldSchema {
- return map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the entity. If set, updates the corresponding existing entity.",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Name of the entity",
- },
- "metadata": {
- Type: framework.TypeKVPairs,
- Description: `Metadata to be associated with the entity.
-In CLI, this parameter can be repeated multiple times, and it all gets merged together.
-For example:
-vault metadata=key1=value1 metadata=key2=value2
- `,
- },
- "policies": {
- Type: framework.TypeCommaStringSlice,
- Description: "Policies to be tied to the entity.",
- },
- "disabled": {
- Type: framework.TypeBool,
- Description: "If set true, tokens tied to this identity will not be able to be used (but will not be revoked).",
- },
- }
-}
-
-// entityPaths returns the API endpoints supported to operate on entities.
-// Following are the paths supported:
-// entity - To register a new entity
-// entity/id - To lookup, modify, delete and list entities based on ID
-// entity/merge - To merge entities based on ID
-func entityPaths(i *IdentityStore) []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "entity$",
- Fields: entityPathFields(),
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleEntityUpdateCommon(),
- },
-
- HelpSynopsis: strings.TrimSpace(entityHelp["entity"][0]),
- HelpDescription: strings.TrimSpace(entityHelp["entity"][1]),
- },
- {
- Pattern: "entity/name/" + framework.GenericNameRegex("name"),
- Fields: entityPathFields(),
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleEntityUpdateCommon(),
- logical.ReadOperation: i.pathEntityNameRead(),
- logical.DeleteOperation: i.pathEntityNameDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(entityHelp["entity-name"][0]),
- HelpDescription: strings.TrimSpace(entityHelp["entity-name"][1]),
- },
- {
- Pattern: "entity/id/" + framework.GenericNameRegex("id"),
- Fields: entityPathFields(),
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleEntityUpdateCommon(),
- logical.ReadOperation: i.pathEntityIDRead(),
- logical.DeleteOperation: i.pathEntityIDDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(entityHelp["entity-id"][0]),
- HelpDescription: strings.TrimSpace(entityHelp["entity-id"][1]),
- },
- {
- Pattern: "entity/name/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathEntityNameList(),
- },
-
- HelpSynopsis: strings.TrimSpace(entityHelp["entity-name-list"][0]),
- HelpDescription: strings.TrimSpace(entityHelp["entity-name-list"][1]),
- },
- {
- Pattern: "entity/id/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathEntityIDList(),
- },
-
- HelpSynopsis: strings.TrimSpace(entityHelp["entity-id-list"][0]),
- HelpDescription: strings.TrimSpace(entityHelp["entity-id-list"][1]),
- },
- {
- Pattern: "entity/merge/?$",
- Fields: map[string]*framework.FieldSchema{
- "from_entity_ids": {
- Type: framework.TypeCommaStringSlice,
- Description: "Entity IDs which needs to get merged",
- },
- "to_entity_id": {
- Type: framework.TypeString,
- Description: "Entity ID into which all the other entities need to get merged",
- },
- "force": {
- Type: framework.TypeBool,
- Description: "Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the same type both in entities that are merged from and in entity into which all others are getting merged, secrets in the destination will be unaltered. If not set, this API will throw an error containing all the conflicts.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathEntityMergeID(),
- },
-
- HelpSynopsis: strings.TrimSpace(entityHelp["entity-merge-id"][0]),
- HelpDescription: strings.TrimSpace(entityHelp["entity-merge-id"][1]),
- },
- }
-}
-
-// pathEntityMergeID merges two or more entities into a single entity
-func (i *IdentityStore) pathEntityMergeID() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- toEntityID := d.Get("to_entity_id").(string)
- if toEntityID == "" {
- return logical.ErrorResponse("missing entity id to merge to"), nil
- }
-
- fromEntityIDs := d.Get("from_entity_ids").([]string)
- if len(fromEntityIDs) == 0 {
- return logical.ErrorResponse("missing entity ids to merge from"), nil
- }
-
- force := d.Get("force").(bool)
-
- // Create a MemDB transaction to merge entities
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- toEntity, err := i.MemDBEntityByID(toEntityID, true)
- if err != nil {
- return nil, err
- }
-
- userErr, intErr := i.mergeEntity(ctx, txn, toEntity, fromEntityIDs, force, true, false)
- if userErr != nil {
- return logical.ErrorResponse(userErr.Error()), nil
- }
- if intErr != nil {
- return nil, intErr
- }
-
- // Committing the transaction *after* successfully performing storage
- // persistence
- txn.Commit()
-
- return nil, nil
- }
-}
-
-// handleEntityUpdateCommon is used to update an entity
-func (i *IdentityStore) handleEntityUpdateCommon() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- i.lock.Lock()
- defer i.lock.Unlock()
-
- entity := new(identity.Entity)
- var err error
-
- entityID := d.Get("id").(string)
- if entityID != "" {
- entity, err = i.MemDBEntityByID(entityID, true)
- if err != nil {
- return nil, err
- }
- if entity == nil {
- return logical.ErrorResponse("entity not found from id"), nil
- }
- }
-
- // Get the name
- entityName := d.Get("name").(string)
- if entityName != "" {
- entityByName, err := i.MemDBEntityByName(ctx, entityName, false)
- if err != nil {
- return nil, err
- }
- switch {
- case entityByName == nil:
- // Not found, safe to use this name with an existing or new entity
- case entity.ID == "":
- // Entity by ID was not found, but and entity for the supplied
- // name was found. Continue updating the entity.
- entity = entityByName
- case entity.ID == entityByName.ID:
- // Same exact entity, carry on (this is basically a noop then)
- default:
- return logical.ErrorResponse("entity name is already in use"), nil
- }
- }
-
- if entityName != "" {
- entity.Name = entityName
- }
-
- // Update the policies if supplied
- entityPoliciesRaw, ok := d.GetOk("policies")
- if ok {
- entity.Policies = entityPoliciesRaw.([]string)
- }
-
- if strutil.StrListContains(entity.Policies, "root") {
- return logical.ErrorResponse("policies cannot contain root"), nil
- }
-
- disabledRaw, ok := d.GetOk("disabled")
- if ok {
- entity.Disabled = disabledRaw.(bool)
- }
-
- // Get entity metadata
- metadata, ok, err := d.GetOkErr("metadata")
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to parse metadata: %v", err)), nil
- }
- if ok {
- entity.Metadata = metadata.(map[string]string)
- }
-
- // At this point, if entity.ID is empty, it indicates that a new entity
- // is being created. Using this to respond data in the response.
- newEntity := entity.ID == ""
-
- // ID creation and some validations
- err = i.sanitizeEntity(ctx, entity)
- if err != nil {
- return nil, err
- }
-
- if err := i.upsertEntity(ctx, entity, nil, true); err != nil {
- return nil, err
- }
-
- // If this operation was an update to an existing entity, return 204
- if !newEntity {
- return nil, nil
- }
-
- // Prepare the response
- respData := map[string]interface{}{
- "id": entity.ID,
- }
-
- var aliasIDs []string
- for _, alias := range entity.Aliases {
- aliasIDs = append(aliasIDs, alias.ID)
- }
-
- respData["aliases"] = aliasIDs
-
- // Return ID of the entity that was either created or updated along with
- // its aliases
- return &logical.Response{
- Data: respData,
- }, nil
- }
-}
-
-// pathEntityNameRead returns the properties of an entity for a given entity ID
-func (i *IdentityStore) pathEntityNameRead() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entityName := d.Get("name").(string)
- if entityName == "" {
- return logical.ErrorResponse("missing entity name"), nil
- }
-
- entity, err := i.MemDBEntityByName(ctx, entityName, false)
- if err != nil {
- return nil, err
- }
- if entity == nil {
- return nil, nil
- }
-
- return i.handleEntityReadCommon(ctx, entity)
- }
-}
-
-// pathEntityIDRead returns the properties of an entity for a given entity ID
-func (i *IdentityStore) pathEntityIDRead() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entityID := d.Get("id").(string)
- if entityID == "" {
- return logical.ErrorResponse("missing entity id"), nil
- }
-
- entity, err := i.MemDBEntityByID(entityID, false)
- if err != nil {
- return nil, err
- }
- if entity == nil {
- return nil, nil
- }
-
- return i.handleEntityReadCommon(ctx, entity)
- }
-}
-
-func (i *IdentityStore) handleEntityReadCommon(ctx context.Context, entity *identity.Entity) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns.ID != entity.NamespaceID {
- return nil, nil
- }
-
- respData := map[string]interface{}{}
- respData["id"] = entity.ID
- respData["name"] = entity.Name
- respData["metadata"] = entity.Metadata
- respData["merged_entity_ids"] = entity.MergedEntityIDs
- respData["policies"] = entity.Policies
- respData["disabled"] = entity.Disabled
-
- // Convert protobuf timestamp into RFC3339 format
- respData["creation_time"] = ptypes.TimestampString(entity.CreationTime)
- respData["last_update_time"] = ptypes.TimestampString(entity.LastUpdateTime)
-
- // Convert each alias into a map and replace the time format in each
- aliasesToReturn := make([]interface{}, len(entity.Aliases))
- for aliasIdx, alias := range entity.Aliases {
- aliasMap := map[string]interface{}{}
- aliasMap["id"] = alias.ID
- aliasMap["canonical_id"] = alias.CanonicalID
- aliasMap["mount_accessor"] = alias.MountAccessor
- aliasMap["metadata"] = alias.Metadata
- aliasMap["name"] = alias.Name
- aliasMap["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs
- aliasMap["creation_time"] = ptypes.TimestampString(alias.CreationTime)
- aliasMap["last_update_time"] = ptypes.TimestampString(alias.LastUpdateTime)
-
- if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
- aliasMap["mount_type"] = mountValidationResp.MountType
- aliasMap["mount_path"] = mountValidationResp.MountPath
- }
-
- aliasesToReturn[aliasIdx] = aliasMap
- }
-
- // Add the aliases information to the response which has the correct time
- // formats
- respData["aliases"] = aliasesToReturn
-
- addExtraEntityDataToResponse(entity, respData)
-
- // Fetch the groups this entity belongs to and return their identifiers
- groups, inheritedGroups, err := i.groupsByEntityID(entity.ID)
- if err != nil {
- return nil, err
- }
-
- groupIDs := make([]string, len(groups))
- for i, group := range groups {
- groupIDs[i] = group.ID
- }
- respData["direct_group_ids"] = groupIDs
-
- inheritedGroupIDs := make([]string, len(inheritedGroups))
- for i, group := range inheritedGroups {
- inheritedGroupIDs[i] = group.ID
- }
- respData["inherited_group_ids"] = inheritedGroupIDs
-
- respData["group_ids"] = append(groupIDs, inheritedGroupIDs...)
-
- return &logical.Response{
- Data: respData,
- }, nil
-}
-
-// pathEntityIDDelete deletes the entity for a given entity ID
-func (i *IdentityStore) pathEntityIDDelete() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entityID := d.Get("id").(string)
- if entityID == "" {
- return logical.ErrorResponse("missing entity id"), nil
- }
-
- i.lock.Lock()
- defer i.lock.Unlock()
-
- // Create a MemDB transaction to delete entity
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Fetch the entity using its ID
- entity, err := i.MemDBEntityByIDInTxn(txn, entityID, true)
- if err != nil {
- return nil, err
- }
- if entity == nil {
- return nil, nil
- }
-
- err = i.handleEntityDeleteCommon(ctx, txn, entity)
- if err != nil {
- return nil, err
- }
-
- txn.Commit()
-
- return nil, nil
- }
-}
-
-// pathEntityNameDelete deletes the entity for a given entity ID
-func (i *IdentityStore) pathEntityNameDelete() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- entityName := d.Get("name").(string)
- if entityName == "" {
- return logical.ErrorResponse("missing entity name"), nil
- }
-
- i.lock.Lock()
- defer i.lock.Unlock()
-
- // Create a MemDB transaction to delete entity
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- // Fetch the entity using its name
- entity, err := i.MemDBEntityByNameInTxn(ctx, txn, entityName, true)
- if err != nil {
- return nil, err
- }
- // If there is no entity for the ID, do nothing
- if entity == nil {
- return nil, nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if entity.NamespaceID != ns.ID {
- return nil, nil
- }
-
- err = i.handleEntityDeleteCommon(ctx, txn, entity)
- if err != nil {
- return nil, err
- }
-
- txn.Commit()
-
- return nil, nil
- }
-}
-
-func (i *IdentityStore) handleEntityDeleteCommon(ctx context.Context, txn *memdb.Txn, entity *identity.Entity) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- if entity.NamespaceID != ns.ID {
- return nil
- }
-
- // Remove entity ID as a member from all the groups it belongs, both
- // internal and external
- groups, err := i.MemDBGroupsByMemberEntityIDInTxn(txn, entity.ID, true, false)
- if err != nil {
- return nil
- }
-
- for _, group := range groups {
- group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entity.ID)
- err = i.UpsertGroupInTxn(txn, group, true)
- if err != nil {
- return err
- }
- }
-
- // Delete all the aliases in the entity and the respective indexes
- err = i.deleteAliasesInEntityInTxn(txn, entity, entity.Aliases)
- if err != nil {
- return err
- }
-
- // Delete the entity using the same transaction
- err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID)
- if err != nil {
- return err
- }
-
- // Delete the entity from storage
- err = i.entityPacker.DeleteItem(entity.ID)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (i *IdentityStore) pathEntityIDList() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return i.handlePathEntityListCommon(ctx, req, d, true)
- }
-}
-
-func (i *IdentityStore) pathEntityNameList() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return i.handlePathEntityListCommon(ctx, req, d, false)
- }
-}
-
-// handlePathEntityListCommon lists the IDs or names of all the valid entities
-// in the identity store
-func (i *IdentityStore) handlePathEntityListCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, byID bool) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- ws := memdb.NewWatchSet()
-
- txn := i.db.Txn(false)
-
- iter, err := txn.Get(entitiesTable, "namespace_id", ns.ID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch iterator for entities in memdb: {{err}}", err)
- }
-
- ws.Add(iter.WatchCh())
-
- var keys []string
- entityInfo := map[string]interface{}{}
-
- type mountInfo struct {
- MountType string
- MountPath string
- }
- mountAccessorMap := map[string]mountInfo{}
-
- for {
- raw := iter.Next()
- if raw == nil {
- break
- }
- entity := raw.(*identity.Entity)
- if byID {
- keys = append(keys, entity.ID)
- } else {
- keys = append(keys, entity.Name)
- }
- entityInfoEntry := map[string]interface{}{
- "name": entity.Name,
- }
- if len(entity.Aliases) > 0 {
- aliasList := make([]interface{}, 0, len(entity.Aliases))
- for _, alias := range entity.Aliases {
- entry := map[string]interface{}{
- "id": alias.ID,
- "name": alias.Name,
- "mount_accessor": alias.MountAccessor,
- }
-
- mi, ok := mountAccessorMap[alias.MountAccessor]
- if ok {
- entry["mount_type"] = mi.MountType
- entry["mount_path"] = mi.MountPath
- } else {
- mi = mountInfo{}
- if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
- mi.MountType = mountValidationResp.MountType
- mi.MountPath = mountValidationResp.MountPath
- entry["mount_type"] = mi.MountType
- entry["mount_path"] = mi.MountPath
- }
- mountAccessorMap[alias.MountAccessor] = mi
- }
-
- aliasList = append(aliasList, entry)
- }
- entityInfoEntry["aliases"] = aliasList
- }
- entityInfo[entity.ID] = entityInfoEntry
- }
-
- return logical.ListResponseWithInfo(keys, entityInfo), nil
-}
-
-func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntity *identity.Entity, fromEntityIDs []string, force, grabLock, mergePolicies bool) (error, error) {
- if grabLock {
- i.lock.Lock()
- defer i.lock.Unlock()
- }
-
- if toEntity == nil {
- return errors.New("entity id to merge to is invalid"), nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if toEntity.NamespaceID != ns.ID {
- return errors.New("entity id to merge into does not belong to the request's namespace"), nil
- }
-
- // Merge the MFA secrets
- for _, fromEntityID := range fromEntityIDs {
- if fromEntityID == toEntity.ID {
- return errors.New("to_entity_id should not be present in from_entity_ids"), nil
- }
-
- fromEntity, err := i.MemDBEntityByID(fromEntityID, false)
- if err != nil {
- return nil, err
- }
-
- if fromEntity == nil {
- return errors.New("entity id to merge from is invalid"), nil
- }
-
- if fromEntity.NamespaceID != toEntity.NamespaceID {
- return errors.New("entity id to merge from does not belong to this namespace"), nil
- }
-
- for configID, configSecret := range fromEntity.MFASecrets {
- _, ok := toEntity.MFASecrets[configID]
- if ok && !force {
- return nil, fmt.Errorf("conflicting MFA config ID %q in entity ID %q", configID, fromEntity.ID)
- } else {
- toEntity.MFASecrets[configID] = configSecret
- }
- }
- }
-
- for _, fromEntityID := range fromEntityIDs {
- if fromEntityID == toEntity.ID {
- return errors.New("to_entity_id should not be present in from_entity_ids"), nil
- }
-
- fromEntity, err := i.MemDBEntityByID(fromEntityID, false)
- if err != nil {
- return nil, err
- }
-
- if fromEntity == nil {
- return errors.New("entity id to merge from is invalid"), nil
- }
-
- if fromEntity.NamespaceID != toEntity.NamespaceID {
- return errors.New("entity id to merge from does not belong to this namespace"), nil
- }
-
- for _, alias := range fromEntity.Aliases {
- // Set the desired canonical ID
- alias.CanonicalID = toEntity.ID
-
- alias.MergedFromCanonicalIDs = append(alias.MergedFromCanonicalIDs, fromEntity.ID)
-
- err = i.MemDBUpsertAliasInTxn(txn, alias, false)
- if err != nil {
- return nil, errwrap.Wrapf("failed to update alias during merge: {{err}}", err)
- }
-
- // Add the alias to the desired entity
- toEntity.Aliases = append(toEntity.Aliases, alias)
- }
-
- // If told to, merge policies
- if mergePolicies {
- toEntity.Policies = strutil.MergeSlices(toEntity.Policies, fromEntity.Policies)
- }
-
- // If the entity from which we are merging from was already a merged
- // entity, transfer over the Merged set to the entity we are
- // merging into.
- toEntity.MergedEntityIDs = append(toEntity.MergedEntityIDs, fromEntity.MergedEntityIDs...)
-
- // Add the entity from which we are merging from to the list of entities
- // the entity we are merging into is composed of.
- toEntity.MergedEntityIDs = append(toEntity.MergedEntityIDs, fromEntity.ID)
-
- // Delete the entity which we are merging from in MemDB using the same transaction
- err = i.MemDBDeleteEntityByIDInTxn(txn, fromEntity.ID)
- if err != nil {
- return nil, err
- }
-
- // Delete the entity which we are merging from in storage
- err = i.entityPacker.DeleteItem(fromEntity.ID)
- if err != nil {
- return nil, err
- }
- }
-
- // Update MemDB with changes to the entity we are merging to
- err = i.MemDBUpsertEntityInTxn(txn, toEntity)
- if err != nil {
- return nil, err
- }
-
- // Persist the entity which we are merging to
- toEntityAsAny, err := ptypes.MarshalAny(toEntity)
- if err != nil {
- return nil, err
- }
- item := &storagepacker.Item{
- ID: toEntity.ID,
- Message: toEntityAsAny,
- }
-
- err = i.entityPacker.PutItem(item)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-var entityHelp = map[string][2]string{
- "entity": {
- "Create a new entity",
- "",
- },
- "entity-id": {
- "Update, read or delete an entity using entity ID",
- "",
- },
- "entity-name": {
- "Update, read or delete an entity using entity name",
- "",
- },
- "entity-id-list": {
- "List all the entity IDs",
- "",
- },
- "entity-name-list": {
- "List all the entity names",
- "",
- },
- "entity-merge-id": {
- "Merge two or more entities together",
- "",
- },
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_group_aliases.go b/vendor/github.com/hashicorp/vault/vault/identity_store_group_aliases.go
deleted file mode 100644
index 4a57b0aa..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_group_aliases.go
+++ /dev/null
@@ -1,329 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func groupAliasPaths(i *IdentityStore) []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "group-alias$",
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the group alias.",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Alias of the group.",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "Mount accessor to which this alias belongs to.",
- },
- "canonical_id": {
- Type: framework.TypeString,
- Description: "ID of the group to which this is an alias.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathGroupAliasRegister(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias"][0]),
- HelpDescription: strings.TrimSpace(groupAliasHelp["group-alias"][1]),
- },
- {
- Pattern: "group-alias/id/" + framework.GenericNameRegex("id"),
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the group alias.",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Alias of the group.",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "Mount accessor to which this alias belongs to.",
- },
- "canonical_id": {
- Type: framework.TypeString,
- Description: "ID of the group to which this is an alias.",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathGroupAliasIDUpdate(),
- logical.ReadOperation: i.pathGroupAliasIDRead(),
- logical.DeleteOperation: i.pathGroupAliasIDDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias-by-id"][0]),
- HelpDescription: strings.TrimSpace(groupAliasHelp["group-alias-by-id"][1]),
- },
- {
- Pattern: "group-alias/id/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathGroupAliasIDList(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias-id-list"][0]),
- HelpDescription: strings.TrimSpace(groupAliasHelp["group-alias-id-list"][1]),
- },
- }
-}
-
-func (i *IdentityStore) pathGroupAliasRegister() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- _, ok := d.GetOk("id")
- if ok {
- return i.pathGroupAliasIDUpdate()(ctx, req, d)
- }
-
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- return i.handleGroupAliasUpdateCommon(ctx, req, d, nil)
- }
-}
-
-func (i *IdentityStore) pathGroupAliasIDUpdate() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupAliasID := d.Get("id").(string)
- if groupAliasID == "" {
- return logical.ErrorResponse("empty group alias ID"), nil
- }
-
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- groupAlias, err := i.MemDBAliasByID(groupAliasID, true, true)
- if err != nil {
- return nil, err
- }
- if groupAlias == nil {
- return logical.ErrorResponse("invalid group alias ID"), nil
- }
-
- return i.handleGroupAliasUpdateCommon(ctx, req, d, groupAlias)
- }
-}
-
-func (i *IdentityStore) handleGroupAliasUpdateCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, groupAlias *identity.Alias) (*logical.Response, error) {
- var newGroupAlias bool
- var group *identity.Group
- var err error
-
- if groupAlias == nil {
- groupAlias = &identity.Alias{}
- newGroupAlias = true
- }
-
- groupID := d.Get("canonical_id").(string)
- if groupID != "" {
- group, err = i.MemDBGroupByID(groupID, true)
- if err != nil {
- return nil, err
- }
- if group == nil {
- return logical.ErrorResponse("invalid group ID"), nil
- }
- if group.Type != groupTypeExternal {
- return logical.ErrorResponse("alias can't be set on an internal group"), nil
- }
- }
-
- // Get group alias name
- groupAliasName := d.Get("name").(string)
- if groupAliasName == "" {
- return logical.ErrorResponse("missing alias name"), nil
- }
-
- mountAccessor := d.Get("mount_accessor").(string)
- if mountAccessor == "" {
- return logical.ErrorResponse("missing mount_accessor"), nil
- }
-
- mountValidationResp := i.core.router.validateMountByAccessor(mountAccessor)
- if mountValidationResp == nil {
- return logical.ErrorResponse(fmt.Sprintf("invalid mount accessor %q", mountAccessor)), nil
- }
-
- if mountValidationResp.MountLocal {
- return logical.ErrorResponse(fmt.Sprintf("mount_accessor %q is of a local mount", mountAccessor)), nil
- }
-
- groupAliasByFactors, err := i.MemDBAliasByFactors(mountValidationResp.MountAccessor, groupAliasName, false, true)
- if err != nil {
- return nil, err
- }
-
- resp := &logical.Response{}
-
- if newGroupAlias {
- if groupAliasByFactors != nil {
- return logical.ErrorResponse("combination of mount and group alias name is already in use"), nil
- }
-
- // If this is an alias being tied to a non-existent group, create
- // a new group for it.
- if group == nil {
- group = &identity.Group{
- Type: groupTypeExternal,
- Alias: groupAlias,
- }
- } else {
- group.Alias = groupAlias
- }
- } else {
- // Verify that the combination of group alias name and mount is not
- // already tied to a different alias
- if groupAliasByFactors != nil && groupAliasByFactors.ID != groupAlias.ID {
- return logical.ErrorResponse("combination of mount and group alias name is already in use"), nil
- }
-
- // Fetch the group to which the alias is tied to
- existingGroup, err := i.MemDBGroupByAliasID(groupAlias.ID, true)
- if err != nil {
- return nil, err
- }
-
- if existingGroup == nil {
- return nil, fmt.Errorf("group alias is not associated with a group")
- }
-
- if group != nil && group.ID != existingGroup.ID {
- return logical.ErrorResponse("alias is already tied to a different group"), nil
- }
-
- group = existingGroup
- group.Alias = groupAlias
- }
-
- group.Alias.Name = groupAliasName
- group.Alias.MountAccessor = mountValidationResp.MountAccessor
- // Explicitly correct for previous versions that persisted this
- group.Alias.MountType = ""
-
- err = i.sanitizeAndUpsertGroup(ctx, group, nil)
- if err != nil {
- return nil, err
- }
-
- resp.Data = map[string]interface{}{
- "id": groupAlias.ID,
- "canonical_id": group.ID,
- }
-
- return resp, nil
-}
-
-// pathGroupAliasIDRead returns the properties of an alias for a given
-// alias ID
-func (i *IdentityStore) pathGroupAliasIDRead() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupAliasID := d.Get("id").(string)
- if groupAliasID == "" {
- return logical.ErrorResponse("empty group alias id"), nil
- }
-
- groupAlias, err := i.MemDBAliasByID(groupAliasID, false, true)
- if err != nil {
- return nil, err
- }
-
- return i.handleAliasReadCommon(ctx, groupAlias)
- }
-}
-
-// pathGroupAliasIDDelete deletes the group's alias for a given group alias ID
-func (i *IdentityStore) pathGroupAliasIDDelete() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupAliasID := d.Get("id").(string)
- if groupAliasID == "" {
- return logical.ErrorResponse("missing group alias ID"), nil
- }
-
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- alias, err := i.MemDBAliasByIDInTxn(txn, groupAliasID, false, true)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- return nil, nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns.ID != alias.NamespaceID {
- return nil, logical.ErrUnsupportedOperation
- }
-
- group, err := i.MemDBGroupByAliasIDInTxn(txn, alias.ID, true)
- if err != nil {
- return nil, err
- }
-
- // If there is no group tied to a valid alias, something is wrong
- if group == nil {
- return nil, fmt.Errorf("alias not associated to a group")
- }
-
- // Delete group alias in memdb
- err = i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true)
- if err != nil {
- return nil, err
- }
-
- // Delete the alias
- group.Alias = nil
-
- err = i.UpsertGroupInTxn(txn, group, true)
- if err != nil {
- return nil, err
- }
-
- txn.Commit()
-
- return nil, nil
- }
-}
-
-// pathGroupAliasIDList lists the IDs of all the valid group aliases in the
-// identity store
-func (i *IdentityStore) pathGroupAliasIDList() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return i.handleAliasListCommon(ctx, true)
- }
-}
-
-var groupAliasHelp = map[string][2]string{
- "group-alias": {
- "Creates a new group alias, or updates an existing one.",
- "",
- },
- "group-alias-id": {
- "Update, read or delete a group alias using ID.",
- "",
- },
- "group-alias-id-list": {
- "List all the group alias IDs.",
- "",
- },
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_groups.go b/vendor/github.com/hashicorp/vault/vault/identity_store_groups.go
deleted file mode 100644
index d8c3280b..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_groups.go
+++ /dev/null
@@ -1,550 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/golang/protobuf/ptypes"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- groupTypeInternal = "internal"
- groupTypeExternal = "external"
-)
-
-func groupPathFields() map[string]*framework.FieldSchema {
- return map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the group. If set, updates the corresponding existing group.",
- },
- "type": {
- Type: framework.TypeString,
- Description: "Type of the group, 'internal' or 'external'. Defaults to 'internal'",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Name of the group.",
- },
- "metadata": {
- Type: framework.TypeKVPairs,
- Description: `Metadata to be associated with the group.
-In CLI, this parameter can be repeated multiple times, and it all gets merged together.
-For example:
-vault metadata=key1=value1 metadata=key2=value2
- `,
- },
- "policies": {
- Type: framework.TypeCommaStringSlice,
- Description: "Policies to be tied to the group.",
- },
- "member_group_ids": {
- Type: framework.TypeCommaStringSlice,
- Description: "Group IDs to be assigned as group members.",
- },
- "member_entity_ids": {
- Type: framework.TypeCommaStringSlice,
- Description: "Entity IDs to be assigned as group members.",
- },
- }
-}
-
-func groupPaths(i *IdentityStore) []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "group$",
- Fields: groupPathFields(),
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathGroupRegister(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupHelp["register"][0]),
- HelpDescription: strings.TrimSpace(groupHelp["register"][1]),
- },
- {
- Pattern: "group/id/" + framework.GenericNameRegex("id"),
- Fields: groupPathFields(),
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathGroupIDUpdate(),
- logical.ReadOperation: i.pathGroupIDRead(),
- logical.DeleteOperation: i.pathGroupIDDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupHelp["group-by-id"][0]),
- HelpDescription: strings.TrimSpace(groupHelp["group-by-id"][1]),
- },
- {
- Pattern: "group/id/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathGroupIDList(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupHelp["group-id-list"][0]),
- HelpDescription: strings.TrimSpace(groupHelp["group-id-list"][1]),
- },
- {
- Pattern: "group/name/" + framework.GenericNameRegex("name"),
- Fields: groupPathFields(),
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.pathGroupNameUpdate(),
- logical.ReadOperation: i.pathGroupNameRead(),
- logical.DeleteOperation: i.pathGroupNameDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupHelp["group-by-name"][0]),
- HelpDescription: strings.TrimSpace(groupHelp["group-by-name"][1]),
- },
- {
- Pattern: "group/name/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathGroupNameList(),
- },
-
- HelpSynopsis: strings.TrimSpace(groupHelp["group-name-list"][0]),
- HelpDescription: strings.TrimSpace(groupHelp["group-name-list"][1]),
- },
- }
-}
-
-func (i *IdentityStore) pathGroupRegister() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- _, ok := d.GetOk("id")
- if ok {
- return i.pathGroupIDUpdate()(ctx, req, d)
- }
-
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- return i.handleGroupUpdateCommon(ctx, req, d, nil)
- }
-}
-
-func (i *IdentityStore) pathGroupIDUpdate() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupID := d.Get("id").(string)
- if groupID == "" {
- return logical.ErrorResponse("empty group ID"), nil
- }
-
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- group, err := i.MemDBGroupByID(groupID, true)
- if err != nil {
- return nil, err
- }
- if group == nil {
- return logical.ErrorResponse("invalid group ID"), nil
- }
-
- return i.handleGroupUpdateCommon(ctx, req, d, group)
- }
-}
-
-func (i *IdentityStore) pathGroupNameUpdate() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupName := d.Get("name").(string)
- if groupName == "" {
- return logical.ErrorResponse("empty group name"), nil
- }
-
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- group, err := i.MemDBGroupByName(ctx, groupName, true)
- if err != nil {
- return nil, err
- }
- return i.handleGroupUpdateCommon(ctx, req, d, group)
- }
-}
-
-func (i *IdentityStore) handleGroupUpdateCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, group *identity.Group) (*logical.Response, error) {
- var newGroup bool
- if group == nil {
- group = new(identity.Group)
- newGroup = true
- }
-
- // Update the policies if supplied
- policiesRaw, ok := d.GetOk("policies")
- if ok {
- group.Policies = policiesRaw.([]string)
- }
-
- if strutil.StrListContains(group.Policies, "root") {
- return logical.ErrorResponse("policies cannot contain root"), nil
- }
-
- groupTypeRaw, ok := d.GetOk("type")
- if ok {
- groupType := groupTypeRaw.(string)
- if group.Type != "" && groupType != group.Type {
- return logical.ErrorResponse(fmt.Sprintf("group type cannot be changed")), nil
- }
-
- group.Type = groupType
- }
-
- // If group type is not set, default to internal type
- if group.Type == "" {
- group.Type = groupTypeInternal
- }
-
- if group.Type != groupTypeInternal && group.Type != groupTypeExternal {
- return logical.ErrorResponse(fmt.Sprintf("invalid group type %q", group.Type)), nil
- }
-
- // Get the name
- groupName := d.Get("name").(string)
- if groupName != "" {
- // Check if there is a group already existing for the given name
- groupByName, err := i.MemDBGroupByName(ctx, groupName, false)
- if err != nil {
- return nil, err
- }
-
- // If this is a new group and if there already exists a group by this
- // name, error out. If the name of an existing group is about to be
- // modified into something which is already tied to a different group,
- // error out.
- switch {
- case groupByName == nil:
- // Allowed
- case group.ID == "":
- group = groupByName
- case group.ID != "" && groupByName.ID != group.ID:
- return logical.ErrorResponse("group name is already in use"), nil
- }
- group.Name = groupName
- }
-
- metadata, ok, err := d.GetOkErr("metadata")
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("failed to parse metadata: %v", err)), nil
- }
- if ok {
- group.Metadata = metadata.(map[string]string)
- }
-
- memberEntityIDsRaw, ok := d.GetOk("member_entity_ids")
- if ok {
- if group.Type == groupTypeExternal {
- return logical.ErrorResponse("member entities can't be set manually for external groups"), nil
- }
- group.MemberEntityIDs = memberEntityIDsRaw.([]string)
- if len(group.MemberEntityIDs) > 512 {
- return logical.ErrorResponse("member entity IDs exceeding the limit of 512"), nil
- }
- }
-
- memberGroupIDsRaw, ok := d.GetOk("member_group_ids")
- var memberGroupIDs []string
- if ok {
- if group.Type == groupTypeExternal {
- return logical.ErrorResponse("member groups can't be set for external groups"), nil
- }
- memberGroupIDs = memberGroupIDsRaw.([]string)
- }
-
- err = i.sanitizeAndUpsertGroup(ctx, group, memberGroupIDs)
- if err != nil {
- return nil, err
- }
-
- if !newGroup {
- return nil, nil
- }
-
- respData := map[string]interface{}{
- "id": group.ID,
- "name": group.Name,
- }
- return &logical.Response{
- Data: respData,
- }, nil
-}
-
-func (i *IdentityStore) pathGroupIDRead() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupID := d.Get("id").(string)
- if groupID == "" {
- return logical.ErrorResponse("empty group id"), nil
- }
-
- group, err := i.MemDBGroupByID(groupID, false)
- if err != nil {
- return nil, err
- }
- if group == nil {
- return nil, nil
- }
-
- return i.handleGroupReadCommon(ctx, group)
- }
-}
-
-func (i *IdentityStore) pathGroupNameRead() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupName := d.Get("name").(string)
- if groupName == "" {
- return logical.ErrorResponse("empty group name"), nil
- }
-
- group, err := i.MemDBGroupByName(ctx, groupName, false)
- if err != nil {
- return nil, err
- }
- if group == nil {
- return nil, nil
- }
-
- return i.handleGroupReadCommon(ctx, group)
- }
-}
-
-func (i *IdentityStore) handleGroupReadCommon(ctx context.Context, group *identity.Group) (*logical.Response, error) {
- if group == nil {
- return nil, nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns.ID != group.NamespaceID {
- return nil, nil
- }
-
- respData := map[string]interface{}{}
- respData["id"] = group.ID
- respData["name"] = group.Name
- respData["policies"] = group.Policies
- respData["member_entity_ids"] = group.MemberEntityIDs
- respData["parent_group_ids"] = group.ParentGroupIDs
- respData["metadata"] = group.Metadata
- respData["creation_time"] = ptypes.TimestampString(group.CreationTime)
- respData["last_update_time"] = ptypes.TimestampString(group.LastUpdateTime)
- respData["modify_index"] = group.ModifyIndex
- respData["type"] = group.Type
-
- aliasMap := map[string]interface{}{}
- if group.Alias != nil {
- aliasMap["id"] = group.Alias.ID
- aliasMap["canonical_id"] = group.Alias.CanonicalID
- aliasMap["mount_accessor"] = group.Alias.MountAccessor
- aliasMap["metadata"] = group.Alias.Metadata
- aliasMap["name"] = group.Alias.Name
- aliasMap["merged_from_canonical_ids"] = group.Alias.MergedFromCanonicalIDs
- aliasMap["creation_time"] = ptypes.TimestampString(group.Alias.CreationTime)
- aliasMap["last_update_time"] = ptypes.TimestampString(group.Alias.LastUpdateTime)
-
- if mountValidationResp := i.core.router.validateMountByAccessor(group.Alias.MountAccessor); mountValidationResp != nil {
- aliasMap["mount_path"] = mountValidationResp.MountPath
- aliasMap["mount_type"] = mountValidationResp.MountType
- }
- }
-
- respData["alias"] = aliasMap
-
- var memberGroupIDs []string
- memberGroups, err := i.MemDBGroupsByParentGroupID(group.ID, false)
- if err != nil {
- return nil, err
- }
- for _, memberGroup := range memberGroups {
- memberGroupIDs = append(memberGroupIDs, memberGroup.ID)
- }
-
- respData["member_group_ids"] = memberGroupIDs
-
- return &logical.Response{
- Data: respData,
- }, nil
-}
-
-func (i *IdentityStore) pathGroupIDDelete() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupID := d.Get("id").(string)
- if groupID == "" {
- return logical.ErrorResponse("empty group ID"), nil
- }
-
- return i.handleGroupDeleteCommon(ctx, groupID, true)
- }
-}
-
-func (i *IdentityStore) pathGroupNameDelete() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- groupName := d.Get("name").(string)
- if groupName == "" {
- return logical.ErrorResponse("empty group name"), nil
- }
-
- return i.handleGroupDeleteCommon(ctx, groupName, false)
- }
-}
-
-func (i *IdentityStore) handleGroupDeleteCommon(ctx context.Context, key string, byID bool) (*logical.Response, error) {
- // Acquire the lock to modify the group storage entry
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- // Create a MemDB transaction to delete group
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- var group *identity.Group
- var err error
- switch byID {
- case true:
- group, err = i.MemDBGroupByIDInTxn(txn, key, false)
- if err != nil {
- return nil, err
- }
- default:
- group, err = i.MemDBGroupByNameInTxn(ctx, txn, key, false)
- if err != nil {
- return nil, err
- }
- }
- if group == nil {
- return nil, nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if group.NamespaceID != ns.ID {
- return nil, nil
- }
-
- // Delete group alias from memdb
- if group.Type == groupTypeExternal && group.Alias != nil {
- err = i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true)
- if err != nil {
- return nil, err
- }
- }
-
- // Delete the group using the same transaction
- err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID)
- if err != nil {
- return nil, err
- }
-
- // Delete the group from storage
- err = i.groupPacker.DeleteItem(group.ID)
- if err != nil {
- return nil, err
- }
-
- // Committing the transaction *after* successfully deleting group
- txn.Commit()
-
- return nil, nil
-}
-
-// pathGroupIDList lists the IDs of all the groups in the identity store
-func (i *IdentityStore) pathGroupIDList() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return i.handleGroupListCommon(ctx, true)
- }
-}
-
-// pathGroupNameList lists the names of all the groups in the identity store
-func (i *IdentityStore) pathGroupNameList() framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return i.handleGroupListCommon(ctx, false)
- }
-}
-
-func (i *IdentityStore) handleGroupListCommon(ctx context.Context, byID bool) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- txn := i.db.Txn(false)
-
- iter, err := txn.Get(groupsTable, "namespace_id", ns.ID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to lookup groups using namespace ID: {{err}}", err)
- }
-
- var keys []string
- groupInfo := map[string]interface{}{}
-
- type mountInfo struct {
- MountType string
- MountPath string
- }
- mountAccessorMap := map[string]mountInfo{}
-
- for entry := iter.Next(); entry != nil; entry = iter.Next() {
- group := entry.(*identity.Group)
-
- if byID {
- keys = append(keys, group.ID)
- } else {
- keys = append(keys, group.Name)
- }
-
- groupInfoEntry := map[string]interface{}{
- "name": group.Name,
- "num_member_entities": len(group.MemberEntityIDs),
- "num_parent_groups": len(group.ParentGroupIDs),
- }
- if group.Alias != nil {
- entry := map[string]interface{}{
- "id": group.Alias.ID,
- "name": group.Alias.Name,
- "mount_accessor": group.Alias.MountAccessor,
- }
-
- mi, ok := mountAccessorMap[group.Alias.MountAccessor]
- if ok {
- entry["mount_type"] = mi.MountType
- entry["mount_path"] = mi.MountPath
- } else {
- mi = mountInfo{}
- if mountValidationResp := i.core.router.validateMountByAccessor(group.Alias.MountAccessor); mountValidationResp != nil {
- mi.MountType = mountValidationResp.MountType
- mi.MountPath = mountValidationResp.MountPath
- entry["mount_type"] = mi.MountType
- entry["mount_path"] = mi.MountPath
- }
- mountAccessorMap[group.Alias.MountAccessor] = mi
- }
-
- groupInfoEntry["alias"] = entry
- }
- groupInfo[group.ID] = groupInfoEntry
- }
-
- return logical.ListResponseWithInfo(keys, groupInfo), nil
-}
-
-var groupHelp = map[string][2]string{
- "register": {
- "Create a new group.",
- "",
- },
- "group-by-id": {
- "Update or delete an existing group using its ID.",
- "",
- },
- "group-id-list": {
- "List all the group IDs.",
- "",
- },
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_schema.go b/vendor/github.com/hashicorp/vault/vault/identity_store_schema.go
deleted file mode 100644
index 5989c4f1..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_schema.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package vault
-
-import (
- "fmt"
-
- memdb "github.com/hashicorp/go-memdb"
-)
-
-const (
- entitiesTable = "entities"
- entityAliasesTable = "entity_aliases"
- groupsTable = "groups"
- groupAliasesTable = "group_aliases"
-)
-
-func identityStoreSchema(lowerCaseName bool) *memdb.DBSchema {
- iStoreSchema := &memdb.DBSchema{
- Tables: make(map[string]*memdb.TableSchema),
- }
-
- schemas := []func(bool) *memdb.TableSchema{
- entitiesTableSchema,
- aliasesTableSchema,
- groupsTableSchema,
- groupAliasesTableSchema,
- }
-
- for _, schemaFunc := range schemas {
- schema := schemaFunc(lowerCaseName)
- if _, ok := iStoreSchema.Tables[schema.Name]; ok {
- panic(fmt.Sprintf("duplicate table name: %s", schema.Name))
- }
- iStoreSchema.Tables[schema.Name] = schema
- }
-
- return iStoreSchema
-}
-
-func aliasesTableSchema(lowerCaseName bool) *memdb.TableSchema {
- return &memdb.TableSchema{
- Name: entityAliasesTable,
- Indexes: map[string]*memdb.IndexSchema{
- "id": &memdb.IndexSchema{
- Name: "id",
- Unique: true,
- Indexer: &memdb.StringFieldIndex{
- Field: "ID",
- },
- },
- "factors": &memdb.IndexSchema{
- Name: "factors",
- Unique: true,
- Indexer: &memdb.CompoundIndex{
- Indexes: []memdb.Indexer{
- &memdb.StringFieldIndex{
- Field: "MountAccessor",
- },
- &memdb.StringFieldIndex{
- Field: "Name",
- Lowercase: lowerCaseName,
- },
- },
- },
- },
- "namespace_id": &memdb.IndexSchema{
- Name: "namespace_id",
- Indexer: &memdb.StringFieldIndex{
- Field: "NamespaceID",
- },
- },
- },
- }
-}
-
-func entitiesTableSchema(lowerCaseName bool) *memdb.TableSchema {
- return &memdb.TableSchema{
- Name: entitiesTable,
- Indexes: map[string]*memdb.IndexSchema{
- "id": &memdb.IndexSchema{
- Name: "id",
- Unique: true,
- Indexer: &memdb.StringFieldIndex{
- Field: "ID",
- },
- },
- "name": &memdb.IndexSchema{
- Name: "name",
- Unique: true,
- Indexer: &memdb.CompoundIndex{
- Indexes: []memdb.Indexer{
- &memdb.StringFieldIndex{
- Field: "NamespaceID",
- },
- &memdb.StringFieldIndex{
- Field: "Name",
- Lowercase: lowerCaseName,
- },
- },
- },
- },
- "merged_entity_ids": &memdb.IndexSchema{
- Name: "merged_entity_ids",
- Unique: true,
- AllowMissing: true,
- Indexer: &memdb.StringSliceFieldIndex{
- Field: "MergedEntityIDs",
- },
- },
- "bucket_key_hash": &memdb.IndexSchema{
- Name: "bucket_key_hash",
- Indexer: &memdb.StringFieldIndex{
- Field: "BucketKeyHash",
- },
- },
- "namespace_id": &memdb.IndexSchema{
- Name: "namespace_id",
- Indexer: &memdb.StringFieldIndex{
- Field: "NamespaceID",
- },
- },
- },
- }
-}
-
-func groupsTableSchema(lowerCaseName bool) *memdb.TableSchema {
- return &memdb.TableSchema{
- Name: groupsTable,
- Indexes: map[string]*memdb.IndexSchema{
- "id": {
- Name: "id",
- Unique: true,
- Indexer: &memdb.StringFieldIndex{
- Field: "ID",
- },
- },
- "name": {
- Name: "name",
- Unique: true,
- Indexer: &memdb.CompoundIndex{
- Indexes: []memdb.Indexer{
- &memdb.StringFieldIndex{
- Field: "NamespaceID",
- },
- &memdb.StringFieldIndex{
- Field: "Name",
- Lowercase: lowerCaseName,
- },
- },
- },
- },
- "member_entity_ids": {
- Name: "member_entity_ids",
- AllowMissing: true,
- Indexer: &memdb.StringSliceFieldIndex{
- Field: "MemberEntityIDs",
- },
- },
- "parent_group_ids": {
- Name: "parent_group_ids",
- AllowMissing: true,
- Indexer: &memdb.StringSliceFieldIndex{
- Field: "ParentGroupIDs",
- },
- },
- "bucket_key_hash": &memdb.IndexSchema{
- Name: "bucket_key_hash",
- Indexer: &memdb.StringFieldIndex{
- Field: "BucketKeyHash",
- },
- },
- "namespace_id": &memdb.IndexSchema{
- Name: "namespace_id",
- Indexer: &memdb.StringFieldIndex{
- Field: "NamespaceID",
- },
- },
- },
- }
-}
-
-func groupAliasesTableSchema(lowerCaseName bool) *memdb.TableSchema {
- return &memdb.TableSchema{
- Name: groupAliasesTable,
- Indexes: map[string]*memdb.IndexSchema{
- "id": &memdb.IndexSchema{
- Name: "id",
- Unique: true,
- Indexer: &memdb.StringFieldIndex{
- Field: "ID",
- },
- },
- "factors": &memdb.IndexSchema{
- Name: "factors",
- Unique: true,
- Indexer: &memdb.CompoundIndex{
- Indexes: []memdb.Indexer{
- &memdb.StringFieldIndex{
- Field: "MountAccessor",
- },
- &memdb.StringFieldIndex{
- Field: "Name",
- Lowercase: lowerCaseName,
- },
- },
- },
- },
- "namespace_id": &memdb.IndexSchema{
- Name: "namespace_id",
- Indexer: &memdb.StringFieldIndex{
- Field: "NamespaceID",
- },
- },
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_structs.go b/vendor/github.com/hashicorp/vault/vault/identity_store_structs.go
deleted file mode 100644
index c8e8026c..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_structs.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package vault
-
-import (
- "regexp"
- "sync"
-
- log "github.com/hashicorp/go-hclog"
- memdb "github.com/hashicorp/go-memdb"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/storagepacker"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- // Storage prefixes
- entityPrefix = "entity/"
-)
-
-var (
- // metaKeyFormatRegEx checks if a metadata key string is valid
- metaKeyFormatRegEx = regexp.MustCompile(`^[a-zA-Z0-9=/+_-]+$`).MatchString
-)
-
-const (
- // The meta key prefix reserved for Vault's internal use
- metaKeyReservedPrefix = "vault-"
-
- // The maximum number of metadata key pairs allowed to be registered
- metaMaxKeyPairs = 64
-
- // The maximum allowed length of a metadata key
- metaKeyMaxLength = 128
-
- // The maximum allowed length of a metadata value
- metaValueMaxLength = 512
-)
-
-// IdentityStore is composed of its own storage view and a MemDB which
-// maintains active in-memory replicas of the storage contents indexed by
-// multiple fields.
-type IdentityStore struct {
- // IdentityStore is a secret backend in Vault
- *framework.Backend
-
- // view is the storage sub-view where all the artifacts of identity store
- // gets persisted
- view logical.Storage
-
- // db is the in-memory database where the storage artifacts gets replicated
- // to enable richer queries based on multiple indexes.
- db *memdb.MemDB
-
- // A lock to make sure things are consistent
- lock sync.RWMutex
-
- // groupLock is used to protect modifications to group entries
- groupLock sync.RWMutex
-
- // logger is the server logger copied over from core
- logger log.Logger
-
- // entityPacker is used to pack multiple entity storage entries into 256
- // buckets
- entityPacker *storagepacker.StoragePacker
-
- // groupPacker is used to pack multiple group storage entries into 256
- // buckets
- groupPacker *storagepacker.StoragePacker
-
- // core is the pointer to Vault's core
- core *Core
-
- // disableLowerCaseNames indicates whether or not identity artifacts are
- // operated case insensitively
- disableLowerCasedNames bool
-}
-
-type groupDiff struct {
- New []*identity.Group
- Deleted []*identity.Group
- Unmodified []*identity.Group
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_upgrade.go b/vendor/github.com/hashicorp/vault/vault/identity_store_upgrade.go
deleted file mode 100644
index ebf3e558..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_upgrade.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package vault
-
-import (
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func upgradePaths(i *IdentityStore) []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "persona$",
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the persona",
- },
- "entity_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this persona belongs to",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "Mount accessor to which this persona belongs to",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Name of the persona",
- },
- "metadata": {
- Type: framework.TypeKVPairs,
- Description: `Metadata to be associated with the persona.
-In CLI, this parameter can be repeated multiple times, and it all gets merged together.
-For example:
-vault metadata=key1=value1 metadata=key2=value2
-`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleEntityUpdateCommon(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]),
- },
- {
- Pattern: "persona/id/" + framework.GenericNameRegex("id"),
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the persona",
- },
- "entity_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this persona should be tied to",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "Mount accessor to which this persona belongs to",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Name of the persona",
- },
- "metadata": {
- Type: framework.TypeKVPairs,
- Description: `Metadata to be associated with the persona.
-In CLI, this parameter can be repeated multiple times, and it all gets merged together.
-For example:
-vault metadata=key1=value1 metadata=key2=value2
-`,
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleEntityUpdateCommon(),
- logical.ReadOperation: i.pathAliasIDRead(),
- logical.DeleteOperation: i.pathAliasIDDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]),
- },
- {
- Pattern: "persona/id/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathAliasIDList(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id-list"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias-id-list"][1]),
- },
- {
- Pattern: "alias$",
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the alias",
- },
- "entity_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this alias belongs to. This field is deprecated in favor of 'canonical_id'.",
- },
- "canonical_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this alias belongs to",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "Mount accessor to which this alias belongs to",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Name of the alias",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleAliasUpdateCommon(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]),
- },
-
- {
- Pattern: "alias/id/" + framework.GenericNameRegex("id"),
- Fields: map[string]*framework.FieldSchema{
- "id": {
- Type: framework.TypeString,
- Description: "ID of the alias",
- },
- "entity_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this alias should be tied to. This field is deprecated in favor of 'canonical_id'.",
- },
- "canonical_id": {
- Type: framework.TypeString,
- Description: "Entity ID to which this alias should be tied to",
- },
- "mount_accessor": {
- Type: framework.TypeString,
- Description: "Mount accessor to which this alias belongs to",
- },
- "name": {
- Type: framework.TypeString,
- Description: "Name of the alias",
- },
- },
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: i.handleAliasUpdateCommon(),
- logical.ReadOperation: i.pathAliasIDRead(),
- logical.DeleteOperation: i.pathAliasIDDelete(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]),
- },
- {
- Pattern: "alias/id/?$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: i.pathAliasIDList(),
- },
-
- HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id-list"][0]),
- HelpDescription: strings.TrimSpace(aliasHelp["alias-id-list"][1]),
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/identity_store_util.go b/vendor/github.com/hashicorp/vault/vault/identity_store_util.go
deleted file mode 100644
index f186b36f..00000000
--- a/vendor/github.com/hashicorp/vault/vault/identity_store_util.go
+++ /dev/null
@@ -1,1979 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
-
- "github.com/golang/protobuf/ptypes"
- "github.com/hashicorp/errwrap"
- memdb "github.com/hashicorp/go-memdb"
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/identity/mfa"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/storagepacker"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- errDuplicateIdentityName = errors.New("duplicate identity name")
-)
-
-func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error {
- if c.identityStore == nil {
- c.logger.Warn("identity store is not setup, skipping loading")
- return nil
- }
-
- loadFunc := func(context.Context) error {
- err := c.identityStore.loadEntities(ctx)
- if err != nil {
- return err
- }
- return c.identityStore.loadGroups(ctx)
- }
-
- // Load everything when memdb is set to operate on lower cased names
- err := loadFunc(ctx)
- switch {
- case err == nil:
- // If it succeeds, all is well
- return nil
- case err != nil && !errwrap.Contains(err, errDuplicateIdentityName.Error()):
- return err
- }
-
- c.identityStore.logger.Warn("enabling case sensitive identity names")
-
- // Set identity store to operate on case sensitive identity names
- c.identityStore.disableLowerCasedNames = true
-
- // Swap the memdb instance by the one which operates on case sensitive
- // names, hence obviating the need to unload anything that's already
- // loaded.
- err = c.identityStore.resetDB(ctx)
- if err != nil {
- return err
- }
-
- // Attempt to load identity artifacts once more after memdb is reset to
- // accept case sensitive names
- return loadFunc(ctx)
-}
-
-func (i *IdentityStore) sanitizeName(name string) string {
- if i.disableLowerCasedNames {
- return name
- }
- return strings.ToLower(name)
-}
-
-func (i *IdentityStore) loadGroups(ctx context.Context) error {
- i.logger.Debug("identity loading groups")
- existing, err := i.groupPacker.View().List(ctx, groupBucketsPrefix)
- if err != nil {
- return errwrap.Wrapf("failed to scan for groups: {{err}}", err)
- }
- i.logger.Debug("groups collected", "num_existing", len(existing))
-
- for _, key := range existing {
- bucket, err := i.groupPacker.GetBucket(i.groupPacker.BucketPath(key))
- if err != nil {
- return err
- }
-
- if bucket == nil {
- continue
- }
-
- for _, item := range bucket.Items {
- group, err := i.parseGroupFromBucketItem(item)
- if err != nil {
- return err
- }
- if group == nil {
- continue
- }
-
- // Ensure that there are no groups with duplicate names
- groupByName, err := i.MemDBGroupByName(ctx, group.Name, false)
- if err != nil {
- return err
- }
- if groupByName != nil {
- i.logger.Warn(errDuplicateIdentityName.Error(), "group_name", group.Name, "conflicting_group_name", groupByName.Name, "action", "merge the contents of duplicated groups into one and delete the other")
- if !i.disableLowerCasedNames {
- return errDuplicateIdentityName
- }
- }
-
- if i.logger.IsDebug() {
- i.logger.Debug("loading group", "name", group.Name, "id", group.ID)
- }
-
- txn := i.db.Txn(true)
-
- // Before pull#5786, entity memberships in groups were not getting
- // updated when respective entities were deleted. This is here to
- // check that the entity IDs in the group are indeed valid, and if
- // not remove them.
- persist := false
- for _, memberEntityID := range group.MemberEntityIDs {
- entity, err := i.MemDBEntityByID(memberEntityID, false)
- if err != nil {
- return err
- }
- if entity == nil {
- persist = true
- group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, memberEntityID)
- }
- }
-
- err = i.UpsertGroupInTxn(txn, group, persist)
- if err != nil {
- txn.Abort()
- return errwrap.Wrapf("failed to update group in memdb: {{err}}", err)
- }
-
- txn.Commit()
- }
- }
-
- if i.logger.IsInfo() {
- i.logger.Info("groups restored")
- }
-
- return nil
-}
-
-func (i *IdentityStore) loadEntities(ctx context.Context) error {
- // Accumulate existing entities
- i.logger.Debug("loading entities")
- existing, err := i.entityPacker.View().List(ctx, storagepacker.StoragePackerBucketsPrefix)
- if err != nil {
- return errwrap.Wrapf("failed to scan for entities: {{err}}", err)
- }
- i.logger.Debug("entities collected", "num_existing", len(existing))
-
- // Make the channels used for the worker pool
- broker := make(chan string)
- quit := make(chan bool)
-
- // Buffer these channels to prevent deadlocks
- errs := make(chan error, len(existing))
- result := make(chan *storagepacker.Bucket, len(existing))
-
- // Use a wait group
- wg := &sync.WaitGroup{}
-
- // Create 64 workers to distribute work to
- for j := 0; j < consts.ExpirationRestoreWorkerCount; j++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
-
- for {
- select {
- case bucketKey, ok := <-broker:
- // broker has been closed, we are done
- if !ok {
- return
- }
-
- bucket, err := i.entityPacker.GetBucket(i.entityPacker.BucketPath(bucketKey))
- if err != nil {
- errs <- err
- continue
- }
-
- // Write results out to the result channel
- result <- bucket
-
- // quit early
- case <-quit:
- return
- }
- }
- }()
- }
-
- // Distribute the collected keys to the workers in a go routine
- wg.Add(1)
- go func() {
- defer wg.Done()
- for j, bucketKey := range existing {
- if j%500 == 0 {
- i.logger.Debug("entities loading", "progress", j)
- }
-
- select {
- case <-quit:
- return
-
- default:
- broker <- bucketKey
- }
- }
-
- // Close the broker, causing worker routines to exit
- close(broker)
- }()
-
- // Restore each key by pulling from the result chan
- for j := 0; j < len(existing); j++ {
- select {
- case err := <-errs:
- // Close all go routines
- close(quit)
-
- return err
-
- case bucket := <-result:
- // If there is no entry, nothing to restore
- if bucket == nil {
- continue
- }
-
- for _, item := range bucket.Items {
- entity, err := i.parseEntityFromBucketItem(ctx, item)
- if err != nil {
- return err
- }
-
- if entity == nil {
- continue
- }
-
- // Ensure that there are no entities with duplicate names
- entityByName, err := i.MemDBEntityByName(ctx, entity.Name, false)
- if err != nil {
- return nil
- }
- if entityByName != nil {
- i.logger.Warn(errDuplicateIdentityName.Error(), "entity_name", entity.Name, "conflicting_entity_name", entityByName.Name, "action", "merge the duplicate entities into one")
- if !i.disableLowerCasedNames {
- return errDuplicateIdentityName
- }
- }
-
- // Only update MemDB and don't hit the storage again
- err = i.upsertEntity(ctx, entity, nil, false)
- if err != nil {
- return errwrap.Wrapf("failed to update entity in MemDB: {{err}}", err)
- }
- }
- }
- }
-
- // Let all go routines finish
- wg.Wait()
-
- if i.logger.IsInfo() {
- i.logger.Info("entities restored")
- }
-
- return nil
-}
-
-// upsertEntityInTxn either creates or updates an existing entity. The
-// operations will be updated in both MemDB and storage. If 'persist' is set to
-// false, then storage will not be updated. When an alias is transferred from
-// one entity to another, both the source and destination entities should get
-// updated, in which case, callers should send in both entity and
-// previousEntity.
-func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, entity *identity.Entity, previousEntity *identity.Entity, persist bool) error {
- var err error
-
- if txn == nil {
- return fmt.Errorf("txn is nil")
- }
-
- if entity == nil {
- return fmt.Errorf("entity is nil")
- }
-
- aliasFactors := make([]string, len(entity.Aliases))
-
- for index, alias := range entity.Aliases {
- // Verify that alias is not associated to a different one already
- aliasByFactors, err := i.MemDBAliasByFactors(alias.MountAccessor, alias.Name, false, false)
- if err != nil {
- return err
- }
-
- switch {
- case aliasByFactors == nil:
- // Not found, no merging needed
- case aliasByFactors.CanonicalID == entity.ID:
- // Lookup found the same entity, so it's already attached to the
- // right place
- case previousEntity != nil && aliasByFactors.CanonicalID == previousEntity.ID:
- // previousEntity isn't upserted yet so may still contain the old
- // alias reference in memdb if it was just changed; validate
- // whether or not it's _actually_ still tied to the entity
- var found bool
- for _, prevEntAlias := range previousEntity.Aliases {
- if prevEntAlias.ID == alias.ID {
- found = true
- break
- }
- }
- // If we didn't find the alias still tied to previousEntity, we
- // shouldn't use the merging logic and should bail
- if !found {
- break
- }
-
- // Otherwise it's still tied to previousEntity and fall through
- // into merging
- fallthrough
- default:
- i.logger.Warn("alias is already tied to a different entity; these entities are being merged", "alias_id", alias.ID, "other_entity_id", aliasByFactors.CanonicalID, "entity_aliases", entity.Aliases, "alias_by_factors", aliasByFactors)
- respErr, intErr := i.mergeEntity(ctx, txn, entity, []string{aliasByFactors.CanonicalID}, true, false, true)
- switch {
- case respErr != nil:
- return respErr
- case intErr != nil:
- return intErr
- }
- // The entity and aliases will be loaded into memdb and persisted
- // as a result of the merge so we are done here
- return nil
- }
-
- if strutil.StrListContains(aliasFactors, i.sanitizeName(alias.Name)+alias.MountAccessor) {
- i.logger.Warn(errDuplicateIdentityName.Error(), "alias_name", alias.Name, "mount_accessor", alias.MountAccessor, "entity_name", entity.Name, "action", "delete one of the duplicate aliases")
- if !i.disableLowerCasedNames {
- return errDuplicateIdentityName
- }
- }
-
- // Insert or update alias in MemDB using the transaction created above
- err = i.MemDBUpsertAliasInTxn(txn, alias, false)
- if err != nil {
- return err
- }
-
- aliasFactors[index] = i.sanitizeName(alias.Name) + alias.MountAccessor
- }
-
- // If previous entity is set, update it in MemDB and persist it
- if previousEntity != nil && persist {
- err = i.MemDBUpsertEntityInTxn(txn, previousEntity)
- if err != nil {
- return err
- }
-
- // Persist the previous entity object
- marshaledPreviousEntity, err := ptypes.MarshalAny(previousEntity)
- if err != nil {
- return err
- }
- err = i.entityPacker.PutItem(&storagepacker.Item{
- ID: previousEntity.ID,
- Message: marshaledPreviousEntity,
- })
- if err != nil {
- return err
- }
- }
-
- // Insert or update entity in MemDB using the transaction created above
- err = i.MemDBUpsertEntityInTxn(txn, entity)
- if err != nil {
- return err
- }
-
- if persist {
- entityAsAny, err := ptypes.MarshalAny(entity)
- if err != nil {
- return err
- }
- item := &storagepacker.Item{
- ID: entity.ID,
- Message: entityAsAny,
- }
-
- // Persist the entity object
- err = i.entityPacker.PutItem(item)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// upsertEntity either creates or updates an existing entity. The operations
-// will be updated in both MemDB and storage. If 'persist' is set to false,
-// then storage will not be updated. When an alias is transferred from one
-// entity to another, both the source and destination entities should get
-// updated, in which case, callers should send in both entity and
-// previousEntity.
-func (i *IdentityStore) upsertEntity(ctx context.Context, entity *identity.Entity, previousEntity *identity.Entity, persist bool) error {
-
- // Create a MemDB transaction to update both alias and entity
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- err := i.upsertEntityInTxn(ctx, txn, entity, previousEntity, persist)
- if err != nil {
- return err
- }
-
- txn.Commit()
-
- return nil
-}
-
-func (i *IdentityStore) MemDBUpsertAliasInTxn(txn *memdb.Txn, alias *identity.Alias, groupAlias bool) error {
- if txn == nil {
- return fmt.Errorf("nil txn")
- }
-
- if alias == nil {
- return fmt.Errorf("alias is nil")
- }
-
- if alias.NamespaceID == "" {
- alias.NamespaceID = namespace.RootNamespaceID
- }
-
- tableName := entityAliasesTable
- if groupAlias {
- tableName = groupAliasesTable
- }
-
- aliasRaw, err := txn.First(tableName, "id", alias.ID)
- if err != nil {
- return errwrap.Wrapf("failed to lookup alias from memdb using alias ID: {{err}}", err)
- }
-
- if aliasRaw != nil {
- err = txn.Delete(tableName, aliasRaw)
- if err != nil {
- return errwrap.Wrapf("failed to delete alias from memdb: {{err}}", err)
- }
- }
-
- if err := txn.Insert(tableName, alias); err != nil {
- return errwrap.Wrapf("failed to update alias into memdb: {{err}}", err)
- }
-
- return nil
-}
-
-func (i *IdentityStore) MemDBAliasByIDInTxn(txn *memdb.Txn, aliasID string, clone bool, groupAlias bool) (*identity.Alias, error) {
- if aliasID == "" {
- return nil, fmt.Errorf("missing alias ID")
- }
-
- if txn == nil {
- return nil, fmt.Errorf("txn is nil")
- }
-
- tableName := entityAliasesTable
- if groupAlias {
- tableName = groupAliasesTable
- }
-
- aliasRaw, err := txn.First(tableName, "id", aliasID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch alias from memdb using alias ID: {{err}}", err)
- }
-
- if aliasRaw == nil {
- return nil, nil
- }
-
- alias, ok := aliasRaw.(*identity.Alias)
- if !ok {
- return nil, fmt.Errorf("failed to declare the type of fetched alias")
- }
-
- if clone {
- return alias.Clone()
- }
-
- return alias, nil
-}
-
-func (i *IdentityStore) MemDBAliasByID(aliasID string, clone bool, groupAlias bool) (*identity.Alias, error) {
- if aliasID == "" {
- return nil, fmt.Errorf("missing alias ID")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBAliasByIDInTxn(txn, aliasID, clone, groupAlias)
-}
-
-func (i *IdentityStore) MemDBAliasByFactors(mountAccessor, aliasName string, clone bool, groupAlias bool) (*identity.Alias, error) {
- if aliasName == "" {
- return nil, fmt.Errorf("missing alias name")
- }
-
- if mountAccessor == "" {
- return nil, fmt.Errorf("missing mount accessor")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBAliasByFactorsInTxn(txn, mountAccessor, aliasName, clone, groupAlias)
-}
-
-func (i *IdentityStore) MemDBAliasByFactorsInTxn(txn *memdb.Txn, mountAccessor, aliasName string, clone bool, groupAlias bool) (*identity.Alias, error) {
- if txn == nil {
- return nil, fmt.Errorf("nil txn")
- }
-
- if aliasName == "" {
- return nil, fmt.Errorf("missing alias name")
- }
-
- if mountAccessor == "" {
- return nil, fmt.Errorf("missing mount accessor")
- }
-
- tableName := entityAliasesTable
- if groupAlias {
- tableName = groupAliasesTable
- }
-
- aliasRaw, err := txn.First(tableName, "factors", mountAccessor, aliasName)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch alias from memdb using factors: {{err}}", err)
- }
-
- if aliasRaw == nil {
- return nil, nil
- }
-
- alias, ok := aliasRaw.(*identity.Alias)
- if !ok {
- return nil, fmt.Errorf("failed to declare the type of fetched alias")
- }
-
- if clone {
- return alias.Clone()
- }
-
- return alias, nil
-}
-
-func (i *IdentityStore) MemDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string, groupAlias bool) error {
- if aliasID == "" {
- return nil
- }
-
- if txn == nil {
- return fmt.Errorf("txn is nil")
- }
-
- alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, groupAlias)
- if err != nil {
- return err
- }
-
- if alias == nil {
- return nil
- }
-
- tableName := entityAliasesTable
- if groupAlias {
- tableName = groupAliasesTable
- }
-
- err = txn.Delete(tableName, alias)
- if err != nil {
- return errwrap.Wrapf("failed to delete alias from memdb: {{err}}", err)
- }
-
- return nil
-}
-
-func (i *IdentityStore) MemDBAliases(ws memdb.WatchSet, groupAlias bool) (memdb.ResultIterator, error) {
- txn := i.db.Txn(false)
-
- tableName := entityAliasesTable
- if groupAlias {
- tableName = groupAliasesTable
- }
-
- iter, err := txn.Get(tableName, "id")
- if err != nil {
- return nil, err
- }
-
- ws.Add(iter.WatchCh())
-
- return iter, nil
-}
-
-func (i *IdentityStore) MemDBUpsertEntityInTxn(txn *memdb.Txn, entity *identity.Entity) error {
- if txn == nil {
- return fmt.Errorf("nil txn")
- }
-
- if entity == nil {
- return fmt.Errorf("entity is nil")
- }
-
- if entity.NamespaceID == "" {
- entity.NamespaceID = namespace.RootNamespaceID
- }
-
- entityRaw, err := txn.First(entitiesTable, "id", entity.ID)
- if err != nil {
- return errwrap.Wrapf("failed to lookup entity from memdb using entity id: {{err}}", err)
- }
-
- if entityRaw != nil {
- err = txn.Delete(entitiesTable, entityRaw)
- if err != nil {
- return errwrap.Wrapf("failed to delete entity from memdb: {{err}}", err)
- }
- }
-
- if err := txn.Insert(entitiesTable, entity); err != nil {
- return errwrap.Wrapf("failed to update entity into memdb: {{err}}", err)
- }
-
- return nil
-}
-
-func (i *IdentityStore) MemDBEntityByIDInTxn(txn *memdb.Txn, entityID string, clone bool) (*identity.Entity, error) {
- if entityID == "" {
- return nil, fmt.Errorf("missing entity id")
- }
-
- if txn == nil {
- return nil, fmt.Errorf("txn is nil")
- }
-
- entityRaw, err := txn.First(entitiesTable, "id", entityID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch entity from memdb using entity id: {{err}}", err)
- }
-
- if entityRaw == nil {
- return nil, nil
- }
-
- entity, ok := entityRaw.(*identity.Entity)
- if !ok {
- return nil, fmt.Errorf("failed to declare the type of fetched entity")
- }
-
- if clone {
- return entity.Clone()
- }
-
- return entity, nil
-}
-
-func (i *IdentityStore) MemDBEntityByID(entityID string, clone bool) (*identity.Entity, error) {
- if entityID == "" {
- return nil, fmt.Errorf("missing entity id")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBEntityByIDInTxn(txn, entityID, clone)
-}
-
-func (i *IdentityStore) MemDBEntityByName(ctx context.Context, entityName string, clone bool) (*identity.Entity, error) {
- if entityName == "" {
- return nil, fmt.Errorf("missing entity name")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBEntityByNameInTxn(ctx, txn, entityName, clone)
-}
-
-func (i *IdentityStore) MemDBEntityByNameInTxn(ctx context.Context, txn *memdb.Txn, entityName string, clone bool) (*identity.Entity, error) {
- if entityName == "" {
- return nil, fmt.Errorf("missing entity name")
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- entityRaw, err := txn.First(entitiesTable, "name", ns.ID, entityName)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch entity from memdb using entity name: {{err}}", err)
- }
-
- if entityRaw == nil {
- return nil, nil
- }
-
- entity, ok := entityRaw.(*identity.Entity)
- if !ok {
- return nil, fmt.Errorf("failed to declare the type of fetched entity")
- }
-
- if clone {
- return entity.Clone()
- }
-
- return entity, nil
-}
-
-func (i *IdentityStore) MemDBEntitiesByBucketEntryKeyHashInTxn(txn *memdb.Txn, hashValue string) ([]*identity.Entity, error) {
- if txn == nil {
- return nil, fmt.Errorf("nil txn")
- }
-
- if hashValue == "" {
- return nil, fmt.Errorf("empty hash value")
- }
-
- entitiesIter, err := txn.Get(entitiesTable, "bucket_key_hash", hashValue)
- if err != nil {
- return nil, errwrap.Wrapf("failed to lookup entities using bucket entry key hash: {{err}}", err)
- }
-
- var entities []*identity.Entity
- for entity := entitiesIter.Next(); entity != nil; entity = entitiesIter.Next() {
- entities = append(entities, entity.(*identity.Entity))
- }
-
- return entities, nil
-}
-
-func (i *IdentityStore) MemDBEntityByMergedEntityID(mergedEntityID string, clone bool) (*identity.Entity, error) {
- if mergedEntityID == "" {
- return nil, fmt.Errorf("missing merged entity id")
- }
-
- txn := i.db.Txn(false)
-
- entityRaw, err := txn.First(entitiesTable, "merged_entity_ids", mergedEntityID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch entity from memdb using merged entity id: {{err}}", err)
- }
-
- if entityRaw == nil {
- return nil, nil
- }
-
- entity, ok := entityRaw.(*identity.Entity)
- if !ok {
- return nil, fmt.Errorf("failed to declare the type of fetched entity")
- }
-
- if clone {
- return entity.Clone()
- }
-
- return entity, nil
-}
-
-func (i *IdentityStore) MemDBEntityByAliasIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Entity, error) {
- if aliasID == "" {
- return nil, fmt.Errorf("missing alias ID")
- }
-
- if txn == nil {
- return nil, fmt.Errorf("txn is nil")
- }
-
- alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, false)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- return nil, nil
- }
-
- return i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, clone)
-}
-
-func (i *IdentityStore) MemDBEntityByAliasID(aliasID string, clone bool) (*identity.Entity, error) {
- if aliasID == "" {
- return nil, fmt.Errorf("missing alias ID")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBEntityByAliasIDInTxn(txn, aliasID, clone)
-}
-
-func (i *IdentityStore) MemDBDeleteEntityByID(entityID string) error {
- if entityID == "" {
- return nil
- }
-
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- err := i.MemDBDeleteEntityByIDInTxn(txn, entityID)
- if err != nil {
- return err
- }
-
- txn.Commit()
-
- return nil
-}
-
-func (i *IdentityStore) MemDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID string) error {
- if entityID == "" {
- return nil
- }
-
- if txn == nil {
- return fmt.Errorf("txn is nil")
- }
-
- entity, err := i.MemDBEntityByIDInTxn(txn, entityID, false)
- if err != nil {
- return err
- }
-
- if entity == nil {
- return nil
- }
-
- err = txn.Delete(entitiesTable, entity)
- if err != nil {
- return errwrap.Wrapf("failed to delete entity from memdb: {{err}}", err)
- }
-
- return nil
-}
-
-func (i *IdentityStore) sanitizeAlias(ctx context.Context, alias *identity.Alias) error {
- var err error
-
- if alias == nil {
- return fmt.Errorf("alias is nil")
- }
-
- // Alias must always be tied to a canonical object
- if alias.CanonicalID == "" {
- return fmt.Errorf("missing canonical ID")
- }
-
- // Alias must have a name
- if alias.Name == "" {
- return fmt.Errorf("missing alias name %q", alias.Name)
- }
-
- // Alias metadata should always be map[string]string
- err = validateMetadata(alias.Metadata)
- if err != nil {
- return errwrap.Wrapf("invalid alias metadata: {{err}}", err)
- }
-
- // Create an ID if there isn't one already
- if alias.ID == "" {
- alias.ID, err = uuid.GenerateUUID()
- if err != nil {
- return fmt.Errorf("failed to generate alias ID")
- }
- }
-
- if alias.NamespaceID == "" {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- alias.NamespaceID = ns.ID
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- if ns.ID != alias.NamespaceID {
- return fmt.Errorf("alias belongs to a different namespace")
- }
-
- // Set the creation and last update times
- if alias.CreationTime == nil {
- alias.CreationTime = ptypes.TimestampNow()
- alias.LastUpdateTime = alias.CreationTime
- } else {
- alias.LastUpdateTime = ptypes.TimestampNow()
- }
-
- return nil
-}
-
-func (i *IdentityStore) sanitizeEntity(ctx context.Context, entity *identity.Entity) error {
- var err error
-
- if entity == nil {
- return fmt.Errorf("entity is nil")
- }
-
- // Create an ID if there isn't one already
- if entity.ID == "" {
- entity.ID, err = uuid.GenerateUUID()
- if err != nil {
- return fmt.Errorf("failed to generate entity id")
- }
-
- // Set the hash value of the storage bucket key in entity
- entity.BucketKeyHash = i.entityPacker.BucketKeyHashByItemID(entity.ID)
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- if entity.NamespaceID == "" {
- entity.NamespaceID = ns.ID
- }
- if ns.ID != entity.NamespaceID {
- return fmt.Errorf("entity does not belong to this namespace")
- }
-
- // Create a name if there isn't one already
- if entity.Name == "" {
- entity.Name, err = i.generateName(ctx, "entity")
- if err != nil {
- return fmt.Errorf("failed to generate entity name")
- }
- }
-
- // Entity metadata should always be map[string]string
- err = validateMetadata(entity.Metadata)
- if err != nil {
- return errwrap.Wrapf("invalid entity metadata: {{err}}", err)
- }
-
- // Set the creation and last update times
- if entity.CreationTime == nil {
- entity.CreationTime = ptypes.TimestampNow()
- entity.LastUpdateTime = entity.CreationTime
- } else {
- entity.LastUpdateTime = ptypes.TimestampNow()
- }
-
- // Ensure that MFASecrets is non-nil at any time. This is useful when MFA
- // secret generation procedures try to append MFA info to entity.
- if entity.MFASecrets == nil {
- entity.MFASecrets = make(map[string]*mfa.Secret)
- }
-
- return nil
-}
-
-func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *identity.Group, memberGroupIDs []string) error {
- var err error
-
- if group == nil {
- return fmt.Errorf("group is nil")
- }
-
- // Create an ID if there isn't one already
- if group.ID == "" {
- group.ID, err = uuid.GenerateUUID()
- if err != nil {
- return fmt.Errorf("failed to generate group id")
- }
-
- // Set the hash value of the storage bucket key in group
- group.BucketKeyHash = i.groupPacker.BucketKeyHashByItemID(group.ID)
- }
-
- if group.NamespaceID == "" {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- group.NamespaceID = ns.ID
- }
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- if ns.ID != group.NamespaceID {
- return fmt.Errorf("group does not belong to this namespace")
- }
-
- // Create a name if there isn't one already
- if group.Name == "" {
- group.Name, err = i.generateName(ctx, "group")
- if err != nil {
- return fmt.Errorf("failed to generate group name")
- }
- }
-
- // Entity metadata should always be map[string]string
- err = validateMetadata(group.Metadata)
- if err != nil {
- return errwrap.Wrapf("invalid group metadata: {{err}}", err)
- }
-
- // Set the creation and last update times
- if group.CreationTime == nil {
- group.CreationTime = ptypes.TimestampNow()
- group.LastUpdateTime = group.CreationTime
- } else {
- group.LastUpdateTime = ptypes.TimestampNow()
- }
-
- // Remove duplicate entity IDs and check if all IDs are valid
- group.MemberEntityIDs = strutil.RemoveDuplicates(group.MemberEntityIDs, false)
- for _, entityID := range group.MemberEntityIDs {
- entity, err := i.MemDBEntityByID(entityID, false)
- if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to validate entity ID %q: {{err}}", entityID), err)
- }
- if entity == nil {
- return fmt.Errorf("invalid entity ID %q", entityID)
- }
- }
-
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- memberGroupIDs = strutil.RemoveDuplicates(memberGroupIDs, false)
-
- // For those group member IDs that are removed from the list, remove current
- // group ID as their respective ParentGroupID.
-
- // Get the current MemberGroups IDs for this group
- var currentMemberGroupIDs []string
- currentMemberGroups, err := i.MemDBGroupsByParentGroupID(group.ID, false)
- if err != nil {
- return err
- }
- for _, currentMemberGroup := range currentMemberGroups {
- currentMemberGroupIDs = append(currentMemberGroupIDs, currentMemberGroup.ID)
- }
-
- // Update parent group IDs in the removed members
- for _, currentMemberGroupID := range currentMemberGroupIDs {
- if strutil.StrListContains(memberGroupIDs, currentMemberGroupID) {
- continue
- }
-
- currentMemberGroup, err := i.MemDBGroupByID(currentMemberGroupID, true)
- if err != nil {
- return err
- }
- if currentMemberGroup == nil {
- return fmt.Errorf("invalid member group ID %q", currentMemberGroupID)
- }
-
- // Remove group ID from the parent group IDs
- currentMemberGroup.ParentGroupIDs = strutil.StrListDelete(currentMemberGroup.ParentGroupIDs, group.ID)
-
- err = i.UpsertGroupInTxn(txn, currentMemberGroup, true)
- if err != nil {
- return err
- }
- }
-
- // After the group lock is held, make membership updates to all the
- // relevant groups
- for _, memberGroupID := range memberGroupIDs {
- memberGroup, err := i.MemDBGroupByID(memberGroupID, true)
- if err != nil {
- return err
- }
- if memberGroup == nil {
- return fmt.Errorf("invalid member group ID %q", memberGroupID)
- }
-
- // Skip if memberGroupID is already a member of group.ID
- if strutil.StrListContains(memberGroup.ParentGroupIDs, group.ID) {
- continue
- }
-
- // Ensure that adding memberGroupID does not lead to cyclic
- // relationships
- // Detect self loop
- if group.ID == memberGroupID {
- return fmt.Errorf("member group ID %q is same as the ID of the group", group.ID)
- }
-
- groupByID, err := i.MemDBGroupByID(group.ID, true)
- if err != nil {
- return err
- }
-
- // If group is nil, that means that a group doesn't already exist and its
- // okay to add any group as its member group.
- if groupByID != nil {
- // If adding the memberGroupID to groupID creates a cycle, then groupID must
- // be a hop in that loop. Start a DFS traversal from memberGroupID and see if
- // it reaches back to groupID. If it does, then it's a loop.
-
- // Created a visited set
- visited := make(map[string]bool)
- cycleDetected, err := i.detectCycleDFS(visited, groupByID.ID, memberGroupID)
- if err != nil {
- return fmt.Errorf("failed to perform cyclic relationship detection for member group ID %q", memberGroupID)
- }
- if cycleDetected {
- return fmt.Errorf("cyclic relationship detected for member group ID %q", memberGroupID)
- }
- }
-
- memberGroup.ParentGroupIDs = append(memberGroup.ParentGroupIDs, group.ID)
-
- // This technically is not upsert. It is only update, only the method
- // name is upsert here.
- err = i.UpsertGroupInTxn(txn, memberGroup, true)
- if err != nil {
- // Ideally we would want to revert the whole operation in case of
- // errors while persisting in member groups. But there is no
- // storage transaction support yet. When we do have it, this will need
- // an update.
- return err
- }
- }
-
- // Sanitize the group alias
- if group.Alias != nil {
- group.Alias.CanonicalID = group.ID
- err = i.sanitizeAlias(ctx, group.Alias)
- if err != nil {
- return err
- }
- }
-
- err = i.UpsertGroupInTxn(txn, group, true)
- if err != nil {
- return err
- }
-
- txn.Commit()
-
- return nil
-}
-
-func (i *IdentityStore) deleteAliasesInEntityInTxn(txn *memdb.Txn, entity *identity.Entity, aliases []*identity.Alias) error {
- if entity == nil {
- return fmt.Errorf("entity is nil")
- }
-
- if txn == nil {
- return fmt.Errorf("txn is nil")
- }
-
- var remainList []*identity.Alias
- var removeList []*identity.Alias
-
- for _, item := range aliases {
- for _, alias := range entity.Aliases {
- if alias.ID == item.ID {
- removeList = append(removeList, alias)
- } else {
- remainList = append(remainList, alias)
- }
- }
- }
-
- // Remove identity indices from aliases table for those that needs to
- // be removed
- for _, alias := range removeList {
- err := i.MemDBDeleteAliasByIDInTxn(txn, alias.ID, false)
- if err != nil {
- return err
- }
- }
-
- // Update the entity with remaining items
- entity.Aliases = remainList
-
- return nil
-}
-
-// validateMeta validates a set of key/value pairs from the agent config
-func validateMetadata(meta map[string]string) error {
- if len(meta) > metaMaxKeyPairs {
- return fmt.Errorf("metadata cannot contain more than %d key/value pairs", metaMaxKeyPairs)
- }
-
- for key, value := range meta {
- if err := validateMetaPair(key, value); err != nil {
- return errwrap.Wrapf(fmt.Sprintf("failed to load metadata pair (%q, %q): {{err}}", key, value), err)
- }
- }
-
- return nil
-}
-
-// validateMetaPair checks that the given key/value pair is in a valid format
-func validateMetaPair(key, value string) error {
- if key == "" {
- return fmt.Errorf("key cannot be blank")
- }
- if !metaKeyFormatRegEx(key) {
- return fmt.Errorf("key contains invalid characters")
- }
- if len(key) > metaKeyMaxLength {
- return fmt.Errorf("key is too long (limit: %d characters)", metaKeyMaxLength)
- }
- if strings.HasPrefix(key, metaKeyReservedPrefix) {
- return fmt.Errorf("key prefix %q is reserved for internal use", metaKeyReservedPrefix)
- }
- if len(value) > metaValueMaxLength {
- return fmt.Errorf("value is too long (limit: %d characters)", metaValueMaxLength)
- }
- return nil
-}
-
-func (i *IdentityStore) MemDBGroupByNameInTxn(ctx context.Context, txn *memdb.Txn, groupName string, clone bool) (*identity.Group, error) {
- if groupName == "" {
- return nil, fmt.Errorf("missing group name")
- }
-
- if txn == nil {
- return nil, fmt.Errorf("txn is nil")
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- groupRaw, err := txn.First(groupsTable, "name", ns.ID, groupName)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch group from memdb using group name: {{err}}", err)
- }
-
- if groupRaw == nil {
- return nil, nil
- }
-
- group, ok := groupRaw.(*identity.Group)
- if !ok {
- return nil, fmt.Errorf("failed to declare the type of fetched group")
- }
-
- if clone {
- return group.Clone()
- }
-
- return group, nil
-}
-
-func (i *IdentityStore) MemDBGroupByName(ctx context.Context, groupName string, clone bool) (*identity.Group, error) {
- if groupName == "" {
- return nil, fmt.Errorf("missing group name")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBGroupByNameInTxn(ctx, txn, groupName, clone)
-}
-
-func (i *IdentityStore) UpsertGroup(group *identity.Group, persist bool) error {
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- err := i.UpsertGroupInTxn(txn, group, true)
- if err != nil {
- return err
- }
-
- txn.Commit()
-
- return nil
-}
-
-func (i *IdentityStore) UpsertGroupInTxn(txn *memdb.Txn, group *identity.Group, persist bool) error {
- var err error
-
- if txn == nil {
- return fmt.Errorf("txn is nil")
- }
-
- if group == nil {
- return fmt.Errorf("group is nil")
- }
-
- // Increment the modify index of the group
- group.ModifyIndex++
-
- // Clear the old alias from memdb
- groupClone, err := i.MemDBGroupByID(group.ID, true)
- if err != nil {
- return err
- }
- if groupClone != nil && groupClone.Alias != nil {
- err = i.MemDBDeleteAliasByIDInTxn(txn, groupClone.Alias.ID, true)
- if err != nil {
- return err
- }
- }
-
- // Add the new alias to memdb
- if group.Alias != nil {
- err = i.MemDBUpsertAliasInTxn(txn, group.Alias, true)
- if err != nil {
- return err
- }
- }
-
- // Insert or update group in MemDB using the transaction created above
- err = i.MemDBUpsertGroupInTxn(txn, group)
- if err != nil {
- return err
- }
-
- if persist {
- groupAsAny, err := ptypes.MarshalAny(group)
- if err != nil {
- return err
- }
-
- item := &storagepacker.Item{
- ID: group.ID,
- Message: groupAsAny,
- }
-
- sent, err := sendGroupUpgrade(i, group)
- if err != nil {
- return err
- }
- if !sent {
- if err := i.groupPacker.PutItem(item); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (i *IdentityStore) MemDBUpsertGroupInTxn(txn *memdb.Txn, group *identity.Group) error {
- if txn == nil {
- return fmt.Errorf("nil txn")
- }
-
- if group == nil {
- return fmt.Errorf("group is nil")
- }
-
- if group.NamespaceID == "" {
- group.NamespaceID = namespace.RootNamespaceID
- }
-
- groupRaw, err := txn.First(groupsTable, "id", group.ID)
- if err != nil {
- return errwrap.Wrapf("failed to lookup group from memdb using group id: {{err}}", err)
- }
-
- if groupRaw != nil {
- err = txn.Delete(groupsTable, groupRaw)
- if err != nil {
- return errwrap.Wrapf("failed to delete group from memdb: {{err}}", err)
- }
- }
-
- if err := txn.Insert(groupsTable, group); err != nil {
- return errwrap.Wrapf("failed to update group into memdb: {{err}}", err)
- }
-
- return nil
-}
-
-func (i *IdentityStore) MemDBDeleteGroupByIDInTxn(txn *memdb.Txn, groupID string) error {
- if groupID == "" {
- return nil
- }
-
- if txn == nil {
- return fmt.Errorf("txn is nil")
- }
-
- group, err := i.MemDBGroupByIDInTxn(txn, groupID, false)
- if err != nil {
- return err
- }
-
- if group == nil {
- return nil
- }
-
- err = txn.Delete("groups", group)
- if err != nil {
- return errwrap.Wrapf("failed to delete group from memdb: {{err}}", err)
- }
-
- return nil
-}
-
-func (i *IdentityStore) MemDBGroupByIDInTxn(txn *memdb.Txn, groupID string, clone bool) (*identity.Group, error) {
- if groupID == "" {
- return nil, fmt.Errorf("missing group ID")
- }
-
- if txn == nil {
- return nil, fmt.Errorf("txn is nil")
- }
-
- groupRaw, err := txn.First(groupsTable, "id", groupID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch group from memdb using group ID: {{err}}", err)
- }
-
- if groupRaw == nil {
- return nil, nil
- }
-
- group, ok := groupRaw.(*identity.Group)
- if !ok {
- return nil, fmt.Errorf("failed to declare the type of fetched group")
- }
-
- if clone {
- return group.Clone()
- }
-
- return group, nil
-}
-
-func (i *IdentityStore) MemDBGroupByID(groupID string, clone bool) (*identity.Group, error) {
- if groupID == "" {
- return nil, fmt.Errorf("missing group ID")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBGroupByIDInTxn(txn, groupID, clone)
-}
-
-func (i *IdentityStore) MemDBGroupsByParentGroupIDInTxn(txn *memdb.Txn, memberGroupID string, clone bool) ([]*identity.Group, error) {
- if memberGroupID == "" {
- return nil, fmt.Errorf("missing member group ID")
- }
-
- groupsIter, err := txn.Get(groupsTable, "parent_group_ids", memberGroupID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to lookup groups using member group ID: {{err}}", err)
- }
-
- var groups []*identity.Group
- for group := groupsIter.Next(); group != nil; group = groupsIter.Next() {
- entry := group.(*identity.Group)
- if clone {
- entry, err = entry.Clone()
- if err != nil {
- return nil, err
- }
- }
- groups = append(groups, entry)
- }
-
- return groups, nil
-}
-
-func (i *IdentityStore) MemDBGroupsByParentGroupID(memberGroupID string, clone bool) ([]*identity.Group, error) {
- if memberGroupID == "" {
- return nil, fmt.Errorf("missing member group ID")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBGroupsByParentGroupIDInTxn(txn, memberGroupID, clone)
-}
-
-func (i *IdentityStore) MemDBGroupsByMemberEntityID(entityID string, clone bool, externalOnly bool) ([]*identity.Group, error) {
- txn := i.db.Txn(false)
- defer txn.Abort()
-
- return i.MemDBGroupsByMemberEntityIDInTxn(txn, entityID, clone, externalOnly)
-}
-
-func (i *IdentityStore) MemDBGroupsByMemberEntityIDInTxn(txn *memdb.Txn, entityID string, clone bool, externalOnly bool) ([]*identity.Group, error) {
- if entityID == "" {
- return nil, fmt.Errorf("missing entity ID")
- }
-
- groupsIter, err := txn.Get(groupsTable, "member_entity_ids", entityID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to lookup groups using entity ID: {{err}}", err)
- }
-
- var groups []*identity.Group
- for group := groupsIter.Next(); group != nil; group = groupsIter.Next() {
- entry := group.(*identity.Group)
- if externalOnly && entry.Type == groupTypeInternal {
- continue
- }
- if clone {
- entry, err = entry.Clone()
- if err != nil {
- return nil, err
- }
- }
- groups = append(groups, entry)
- }
-
- return groups, nil
-}
-
-func (i *IdentityStore) groupPoliciesByEntityID(entityID string) (map[string][]string, error) {
- if entityID == "" {
- return nil, fmt.Errorf("empty entity ID")
- }
-
- groups, err := i.MemDBGroupsByMemberEntityID(entityID, false, false)
- if err != nil {
- return nil, err
- }
-
- visited := make(map[string]bool)
- policies := make(map[string][]string)
- for _, group := range groups {
- err := i.collectPoliciesReverseDFS(group, visited, policies)
- if err != nil {
- return nil, err
- }
- }
-
- return policies, nil
-}
-
-func (i *IdentityStore) groupsByEntityID(entityID string) ([]*identity.Group, []*identity.Group, error) {
- if entityID == "" {
- return nil, nil, fmt.Errorf("empty entity ID")
- }
-
- groups, err := i.MemDBGroupsByMemberEntityID(entityID, true, false)
- if err != nil {
- return nil, nil, err
- }
-
- visited := make(map[string]bool)
- var tGroups []*identity.Group
- for _, group := range groups {
- gGroups, err := i.collectGroupsReverseDFS(group, visited, nil)
- if err != nil {
- return nil, nil, err
- }
- tGroups = append(tGroups, gGroups...)
- }
-
- // Remove duplicates
- groupMap := make(map[string]*identity.Group)
- for _, group := range tGroups {
- groupMap[group.ID] = group
- }
-
- tGroups = make([]*identity.Group, 0, len(groupMap))
- for _, group := range groupMap {
- tGroups = append(tGroups, group)
- }
-
- diff := diffGroups(groups, tGroups)
-
- // For sanity
- // There should not be any group that gets deleted
- if len(diff.Deleted) != 0 {
- return nil, nil, fmt.Errorf("failed to diff group memberships")
- }
-
- return diff.Unmodified, diff.New, nil
-}
-
-func (i *IdentityStore) collectGroupsReverseDFS(group *identity.Group, visited map[string]bool, groups []*identity.Group) ([]*identity.Group, error) {
- if group == nil {
- return nil, fmt.Errorf("nil group")
- }
-
- // If traversal for a groupID is performed before, skip it
- if visited[group.ID] {
- return groups, nil
- }
- visited[group.ID] = true
-
- groups = append(groups, group)
-
- // Traverse all the parent groups
- for _, parentGroupID := range group.ParentGroupIDs {
- parentGroup, err := i.MemDBGroupByID(parentGroupID, false)
- if err != nil {
- return nil, err
- }
- if parentGroup == nil {
- continue
- }
- groups, err = i.collectGroupsReverseDFS(parentGroup, visited, groups)
- if err != nil {
- return nil, fmt.Errorf("failed to collect group at parent group ID %q", parentGroup.ID)
- }
- }
-
- return groups, nil
-}
-
-func (i *IdentityStore) collectPoliciesReverseDFS(group *identity.Group, visited map[string]bool, policies map[string][]string) error {
- if group == nil {
- return fmt.Errorf("nil group")
- }
-
- // If traversal for a groupID is performed before, skip it
- if visited[group.ID] {
- return nil
- }
- visited[group.ID] = true
-
- policies[group.NamespaceID] = append(policies[group.NamespaceID], group.Policies...)
-
- // Traverse all the parent groups
- for _, parentGroupID := range group.ParentGroupIDs {
- parentGroup, err := i.MemDBGroupByID(parentGroupID, false)
- if err != nil {
- return err
- }
- if parentGroup == nil {
- continue
- }
- err = i.collectPoliciesReverseDFS(parentGroup, visited, policies)
- if err != nil {
- return fmt.Errorf("failed to collect policies at parent group ID %q", parentGroup.ID)
- }
- }
-
- return nil
-}
-
-func (i *IdentityStore) detectCycleDFS(visited map[string]bool, startingGroupID, groupID string) (bool, error) {
- // If the traversal reaches the startingGroupID, a loop is detected
- if startingGroupID == groupID {
- return true, nil
- }
-
- // If traversal for a groupID is performed before, skip it
- if visited[groupID] {
- return false, nil
- }
- visited[groupID] = true
-
- group, err := i.MemDBGroupByID(groupID, true)
- if err != nil {
- return false, err
- }
- if group == nil {
- return false, nil
- }
-
- // Fetch all groups in which groupID is present as a ParentGroupID. In
- // other words, find all the subgroups of groupID.
- memberGroups, err := i.MemDBGroupsByParentGroupID(groupID, false)
- if err != nil {
- return false, err
- }
-
- // DFS traverse the member groups
- for _, memberGroup := range memberGroups {
- cycleDetected, err := i.detectCycleDFS(visited, startingGroupID, memberGroup.ID)
- if err != nil {
- return false, fmt.Errorf("failed to perform cycle detection at member group ID %q", memberGroup.ID)
- }
- if cycleDetected {
- return true, fmt.Errorf("cycle detected at member group ID %q", memberGroup.ID)
- }
- }
-
- return false, nil
-}
-
-func (i *IdentityStore) memberGroupIDsByID(groupID string) ([]string, error) {
- var memberGroupIDs []string
- memberGroups, err := i.MemDBGroupsByParentGroupID(groupID, false)
- if err != nil {
- return nil, err
- }
- for _, memberGroup := range memberGroups {
- memberGroupIDs = append(memberGroupIDs, memberGroup.ID)
- }
- return memberGroupIDs, nil
-}
-
-func (i *IdentityStore) generateName(ctx context.Context, entryType string) (string, error) {
- var name string
-OUTER:
- for {
- randBytes, err := uuid.GenerateRandomBytes(4)
- if err != nil {
- return "", err
- }
- name = fmt.Sprintf("%s_%s", entryType, fmt.Sprintf("%08x", randBytes[0:4]))
-
- switch entryType {
- case "entity":
- entity, err := i.MemDBEntityByName(ctx, name, false)
- if err != nil {
- return "", err
- }
- if entity == nil {
- break OUTER
- }
- case "group":
- group, err := i.MemDBGroupByName(ctx, name, false)
- if err != nil {
- return "", err
- }
- if group == nil {
- break OUTER
- }
- default:
- return "", fmt.Errorf("unrecognized type %q", entryType)
- }
- }
-
- return name, nil
-}
-
-func (i *IdentityStore) MemDBGroupsByBucketEntryKeyHashInTxn(txn *memdb.Txn, hashValue string) ([]*identity.Group, error) {
- if txn == nil {
- return nil, fmt.Errorf("nil txn")
- }
-
- if hashValue == "" {
- return nil, fmt.Errorf("empty hash value")
- }
-
- groupsIter, err := txn.Get(groupsTable, "bucket_key_hash", hashValue)
- if err != nil {
- return nil, errwrap.Wrapf("failed to lookup groups using bucket entry key hash: {{err}}", err)
- }
-
- var groups []*identity.Group
- for group := groupsIter.Next(); group != nil; group = groupsIter.Next() {
- groups = append(groups, group.(*identity.Group))
- }
-
- return groups, nil
-}
-
-func (i *IdentityStore) MemDBGroupByAliasIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Group, error) {
- if aliasID == "" {
- return nil, fmt.Errorf("missing alias ID")
- }
-
- if txn == nil {
- return nil, fmt.Errorf("txn is nil")
- }
-
- alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, true)
- if err != nil {
- return nil, err
- }
-
- if alias == nil {
- return nil, nil
- }
-
- return i.MemDBGroupByIDInTxn(txn, alias.CanonicalID, clone)
-}
-
-func (i *IdentityStore) MemDBGroupByAliasID(aliasID string, clone bool) (*identity.Group, error) {
- if aliasID == "" {
- return nil, fmt.Errorf("missing alias ID")
- }
-
- txn := i.db.Txn(false)
-
- return i.MemDBGroupByAliasIDInTxn(txn, aliasID, clone)
-}
-
-func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID string, groupAliases []*logical.Alias) ([]*logical.Alias, error) {
- i.logger.Debug("refreshing external group memberships", "entity_id", entityID, "group_aliases", groupAliases)
- if entityID == "" {
- return nil, fmt.Errorf("empty entity ID")
- }
-
- i.groupLock.Lock()
- defer i.groupLock.Unlock()
-
- txn := i.db.Txn(true)
- defer txn.Abort()
-
- oldGroups, err := i.MemDBGroupsByMemberEntityIDInTxn(txn, entityID, true, true)
- if err != nil {
- return nil, err
- }
-
- mountAccessor := ""
- if len(groupAliases) != 0 {
- mountAccessor = groupAliases[0].MountAccessor
- }
-
- var newGroups []*identity.Group
- var validAliases []*logical.Alias
- for _, alias := range groupAliases {
- aliasByFactors, err := i.MemDBAliasByFactors(alias.MountAccessor, alias.Name, true, true)
- if err != nil {
- return nil, err
- }
- if aliasByFactors == nil {
- continue
- }
- mappingGroup, err := i.MemDBGroupByAliasID(aliasByFactors.ID, true)
- if err != nil {
- return nil, err
- }
- if mappingGroup == nil {
- return nil, fmt.Errorf("group unavailable for a valid alias ID %q", aliasByFactors.ID)
- }
-
- newGroups = append(newGroups, mappingGroup)
- validAliases = append(validAliases, alias)
- }
-
- diff := diffGroups(oldGroups, newGroups)
-
- // Add the entity ID to all the new groups
- for _, group := range diff.New {
- if group.Type != groupTypeExternal {
- continue
- }
-
- i.logger.Debug("adding member entity ID to external group", "member_entity_id", entityID, "group_id", group.ID)
-
- group.MemberEntityIDs = append(group.MemberEntityIDs, entityID)
-
- err = i.UpsertGroupInTxn(txn, group, true)
- if err != nil {
- return nil, err
- }
- }
-
- // Remove the entity ID from all the deleted groups
- for _, group := range diff.Deleted {
- if group.Type != groupTypeExternal {
- continue
- }
-
- // If the external group is from a different mount, don't remove the
- // entity ID from it.
- if mountAccessor != "" && group.Alias.MountAccessor != mountAccessor {
- continue
- }
-
- i.logger.Debug("removing member entity ID from external group", "member_entity_id", entityID, "group_id", group.ID)
-
- group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entityID)
-
- err = i.UpsertGroupInTxn(txn, group, true)
- if err != nil {
- return nil, err
- }
- }
-
- txn.Commit()
-
- return validAliases, nil
-}
-
-// diffGroups is used to diff two sets of groups
-func diffGroups(old, new []*identity.Group) *groupDiff {
- diff := &groupDiff{}
-
- existing := make(map[string]*identity.Group)
- for _, group := range old {
- existing[group.ID] = group
- }
-
- for _, group := range new {
- // Check if the entry in new is present in the old
- _, ok := existing[group.ID]
-
- // If its not present, then its a new entry
- if !ok {
- diff.New = append(diff.New, group)
- continue
- }
-
- // If its present, it means that its unmodified
- diff.Unmodified = append(diff.Unmodified, group)
-
- // By deleting the unmodified from the old set, we could determine the
- // ones that are stale by looking at the remaining ones.
- delete(existing, group.ID)
- }
-
- // Any remaining entries must have been deleted
- for _, me := range existing {
- diff.Deleted = append(diff.Deleted, me)
- }
-
- return diff
-}
-
-func (i *IdentityStore) handleAliasListCommon(ctx context.Context, groupAlias bool) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- tableName := entityAliasesTable
- if groupAlias {
- tableName = groupAliasesTable
- }
-
- ws := memdb.NewWatchSet()
-
- txn := i.db.Txn(false)
-
- iter, err := txn.Get(tableName, "namespace_id", ns.ID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch iterator for aliases in memdb: {{err}}", err)
- }
-
- ws.Add(iter.WatchCh())
-
- var aliasIDs []string
- aliasInfo := map[string]interface{}{}
-
- type mountInfo struct {
- MountType string
- MountPath string
- }
- mountAccessorMap := map[string]mountInfo{}
-
- for {
- raw := iter.Next()
- if raw == nil {
- break
- }
- alias := raw.(*identity.Alias)
- aliasIDs = append(aliasIDs, alias.ID)
- aliasInfoEntry := map[string]interface{}{
- "name": alias.Name,
- "canonical_id": alias.CanonicalID,
- "mount_accessor": alias.MountAccessor,
- }
-
- mi, ok := mountAccessorMap[alias.MountAccessor]
- if ok {
- aliasInfoEntry["mount_type"] = mi.MountType
- aliasInfoEntry["mount_path"] = mi.MountPath
- } else {
- mi = mountInfo{}
- if mountValidationResp := i.core.router.validateMountByAccessor(alias.MountAccessor); mountValidationResp != nil {
- mi.MountType = mountValidationResp.MountType
- mi.MountPath = mountValidationResp.MountPath
- aliasInfoEntry["mount_type"] = mi.MountType
- aliasInfoEntry["mount_path"] = mi.MountPath
- }
- mountAccessorMap[alias.MountAccessor] = mi
- }
-
- aliasInfo[alias.ID] = aliasInfoEntry
- }
-
- return logical.ListResponseWithInfo(aliasIDs, aliasInfo), nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/init.go b/vendor/github.com/hashicorp/vault/vault/init.go
deleted file mode 100644
index 426cc62a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/init.go
+++ /dev/null
@@ -1,323 +0,0 @@
-package vault
-
-import (
- "context"
- "encoding/base64"
- "encoding/hex"
- "fmt"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/shamir"
-)
-
-// InitParams keeps the init function from being littered with too many
-// params, that's it!
-type InitParams struct {
- BarrierConfig *SealConfig
- RecoveryConfig *SealConfig
- RootTokenPGPKey string
-}
-
-// InitResult is used to provide the key parts back after
-// they are generated as part of the initialization.
-type InitResult struct {
- SecretShares [][]byte
- RecoveryShares [][]byte
- RootToken string
-}
-
-var (
- initPTFunc = func(c *Core) func() { return nil }
-)
-
-// Initialized checks if the Vault is already initialized
-func (c *Core) Initialized(ctx context.Context) (bool, error) {
- // Check the barrier first
- init, err := c.barrier.Initialized(ctx)
- if err != nil {
- c.logger.Error("barrier init check failed", "error", err)
- return false, err
- }
- if !init {
- c.logger.Info("security barrier not initialized")
- return false, nil
- }
-
- // Verify the seal configuration
- sealConf, err := c.seal.BarrierConfig(ctx)
- if err != nil {
- return false, err
- }
- if sealConf == nil {
- return false, fmt.Errorf("core: barrier reports initialized but no seal configuration found")
- }
-
- return true, nil
-}
-
-func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) {
- // Generate a master key
- masterKey, err := c.barrier.GenerateKey()
- if err != nil {
- return nil, nil, errwrap.Wrapf("key generation failed: {{err}}", err)
- }
-
- // Return the master key if only a single key part is used
- var unsealKeys [][]byte
- if sc.SecretShares == 1 {
- unsealKeys = append(unsealKeys, masterKey)
- } else {
- // Split the master key using the Shamir algorithm
- shares, err := shamir.Split(masterKey, sc.SecretShares, sc.SecretThreshold)
- if err != nil {
- return nil, nil, errwrap.Wrapf("failed to generate barrier shares: {{err}}", err)
- }
- unsealKeys = shares
- }
-
- // If we have PGP keys, perform the encryption
- if len(sc.PGPKeys) > 0 {
- hexEncodedShares := make([][]byte, len(unsealKeys))
- for i, _ := range unsealKeys {
- hexEncodedShares[i] = []byte(hex.EncodeToString(unsealKeys[i]))
- }
- _, encryptedShares, err := pgpkeys.EncryptShares(hexEncodedShares, sc.PGPKeys)
- if err != nil {
- return nil, nil, err
- }
- unsealKeys = encryptedShares
- }
-
- return masterKey, unsealKeys, nil
-}
-
-// Initialize is used to initialize the Vault with the given
-// configurations.
-func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitResult, error) {
- barrierConfig := initParams.BarrierConfig
- recoveryConfig := initParams.RecoveryConfig
-
- if c.seal.RecoveryKeySupported() {
- if recoveryConfig == nil {
- return nil, fmt.Errorf("recovery configuration must be supplied")
- }
-
- if recoveryConfig.SecretShares < 1 {
- return nil, fmt.Errorf("recovery configuration must specify a positive number of shares")
- }
-
- // Check if the seal configuration is valid
- if err := recoveryConfig.Validate(); err != nil {
- c.logger.Error("invalid recovery configuration", "error", err)
- return nil, errwrap.Wrapf("invalid recovery configuration: {{err}}", err)
- }
- }
-
- // Check if the seal configuration is valid
- if err := barrierConfig.Validate(); err != nil {
- c.logger.Error("invalid seal configuration", "error", err)
- return nil, errwrap.Wrapf("invalid seal configuration: {{err}}", err)
- }
-
- // Avoid an initialization race
- c.stateLock.Lock()
- defer c.stateLock.Unlock()
-
- // Check if we are initialized
- init, err := c.Initialized(ctx)
- if err != nil {
- return nil, err
- }
- if init {
- return nil, ErrAlreadyInit
- }
-
- err = c.seal.Init(ctx)
- if err != nil {
- c.logger.Error("failed to initialize seal", "error", err)
- return nil, errwrap.Wrapf("error initializing seal: {{err}}", err)
- }
-
- barrierKey, barrierUnsealKeys, err := c.generateShares(barrierConfig)
- if err != nil {
- c.logger.Error("error generating shares", "error", err)
- return nil, err
- }
-
- initPTCleanup := initPTFunc(c)
- if initPTCleanup != nil {
- defer initPTCleanup()
- }
-
- // Initialize the barrier
- if err := c.barrier.Initialize(ctx, barrierKey); err != nil {
- c.logger.Error("failed to initialize barrier", "error", err)
- return nil, errwrap.Wrapf("failed to initialize barrier: {{err}}", err)
- }
- if c.logger.IsInfo() {
- c.logger.Info("security barrier initialized", "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold)
- }
-
- // Unseal the barrier
- if err := c.barrier.Unseal(ctx, barrierKey); err != nil {
- c.logger.Error("failed to unseal barrier", "error", err)
- return nil, errwrap.Wrapf("failed to unseal barrier: {{err}}", err)
- }
-
- // Ensure the barrier is re-sealed
- defer func() {
- // Defers are LIFO so we need to run this here too to ensure the stop
- // happens before sealing. preSeal also stops, so we just make the
- // stopping safe against multiple calls.
- if err := c.barrier.Seal(); err != nil {
- c.logger.Error("failed to seal barrier", "error", err)
- }
- }()
-
- err = c.seal.SetBarrierConfig(ctx, barrierConfig)
- if err != nil {
- c.logger.Error("failed to save barrier configuration", "error", err)
- return nil, errwrap.Wrapf("barrier configuration saving failed: {{err}}", err)
- }
-
- // If we are storing shares, pop them out of the returned results and push
- // them through the seal
- if barrierConfig.StoredShares > 0 {
- var keysToStore [][]byte
- for i := 0; i < barrierConfig.StoredShares; i++ {
- keysToStore = append(keysToStore, barrierUnsealKeys[0])
- barrierUnsealKeys = barrierUnsealKeys[1:]
- }
- if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil {
- c.logger.Error("failed to store keys", "error", err)
- return nil, errwrap.Wrapf("failed to store keys: {{err}}", err)
- }
- }
-
- results := &InitResult{
- SecretShares: barrierUnsealKeys,
- }
-
- // Perform initial setup
- if err := c.setupCluster(ctx); err != nil {
- c.logger.Error("cluster setup failed during init", "error", err)
- return nil, err
- }
-
- // Start tracking
- if initPTCleanup != nil {
- initPTCleanup()
- }
-
- activeCtx, ctxCancel := context.WithCancel(namespace.RootContext(nil))
- if err := c.postUnseal(activeCtx, ctxCancel, standardUnsealStrategy{}); err != nil {
- c.logger.Error("post-unseal setup failed during init", "error", err)
- return nil, err
- }
-
- // Save the configuration regardless, but only generate a key if it's not
- // disabled. When using recovery keys they are stored in the barrier, so
- // this must happen post-unseal.
- if c.seal.RecoveryKeySupported() {
- err = c.seal.SetRecoveryConfig(ctx, recoveryConfig)
- if err != nil {
- c.logger.Error("failed to save recovery configuration", "error", err)
- return nil, errwrap.Wrapf("recovery configuration saving failed: {{err}}", err)
- }
-
- if recoveryConfig.SecretShares > 0 {
- recoveryKey, recoveryUnsealKeys, err := c.generateShares(recoveryConfig)
- if err != nil {
- c.logger.Error("failed to generate recovery shares", "error", err)
- return nil, err
- }
-
- err = c.seal.SetRecoveryKey(ctx, recoveryKey)
- if err != nil {
- return nil, err
- }
-
- results.RecoveryShares = recoveryUnsealKeys
- }
- }
-
- // Generate a new root token
- rootToken, err := c.tokenStore.rootToken(ctx)
- if err != nil {
- c.logger.Error("root token generation failed", "error", err)
- return nil, err
- }
- results.RootToken = rootToken.ID
- c.logger.Info("root token generated")
-
- if initParams.RootTokenPGPKey != "" {
- _, encryptedVals, err := pgpkeys.EncryptShares([][]byte{[]byte(results.RootToken)}, []string{initParams.RootTokenPGPKey})
- if err != nil {
- c.logger.Error("root token encryption failed", "error", err)
- return nil, err
- }
- results.RootToken = base64.StdEncoding.EncodeToString(encryptedVals[0])
- }
-
- // Prepare to re-seal
- if err := c.preSeal(); err != nil {
- c.logger.Error("pre-seal teardown failed", "error", err)
- return nil, err
- }
-
- return results, nil
-}
-
-// UnsealWithStoredKeys performs auto-unseal using stored keys.
-func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
- if !c.seal.StoredKeysSupported() {
- return nil
- }
-
- // Disallow auto-unsealing when migrating
- if c.IsInSealMigration() {
- return nil
- }
-
- sealed := c.Sealed()
- if !sealed {
- return nil
- }
-
- c.logger.Info("stored unseal keys supported, attempting fetch")
- keys, err := c.seal.GetStoredKeys(ctx)
- if err != nil {
- c.logger.Error("fetching stored unseal keys failed", "error", err)
- return &NonFatalError{Err: errwrap.Wrapf("fetching stored unseal keys failed: {{err}}", err)}
- }
- if len(keys) == 0 {
- c.logger.Warn("stored unseal key(s) supported but none found")
- } else {
- unsealed := false
- keysUsed := 0
- for _, key := range keys {
- unsealed, err = c.Unseal(key)
- if err != nil {
- c.logger.Error("unseal with stored unseal key failed", "error", err)
- return &NonFatalError{Err: errwrap.Wrapf("unseal with stored key failed: {{err}}", err)}
- }
- keysUsed += 1
- if unsealed {
- break
- }
- }
- if !unsealed {
- if c.logger.IsWarn() {
- c.logger.Warn("stored unseal key(s) used but Vault not unsealed yet", "stored_keys_used", keysUsed)
- }
- } else {
- if c.logger.IsInfo() {
- c.logger.Info("successfully unsealed with stored key(s)", "stored_keys_used", keysUsed)
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/keyring.go b/vendor/github.com/hashicorp/vault/vault/keyring.go
deleted file mode 100644
index fd656479..00000000
--- a/vendor/github.com/hashicorp/vault/vault/keyring.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package vault
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/jsonutil"
-)
-
-// Keyring is used to manage multiple encryption keys used by
-// the barrier. New keys can be installed and each has a sequential term.
-// The term used to encrypt a key is prefixed to the key written out.
-// All data is encrypted with the latest key, but storing the old keys
-// allows for decryption of keys written previously. Along with the encryption
-// keys, the keyring also tracks the master key. This is necessary so that
-// when a new key is added to the keyring, we can encrypt with the master key
-// and write out the new keyring.
-type Keyring struct {
- masterKey []byte
- keys map[uint32]*Key
- activeTerm uint32
-}
-
-// EncodedKeyring is used for serialization of the keyring
-type EncodedKeyring struct {
- MasterKey []byte
- Keys []*Key
-}
-
-// Key represents a single term, along with the key used.
-type Key struct {
- Term uint32
- Version int
- Value []byte
- InstallTime time.Time
-}
-
-// Serialize is used to create a byte encoded key
-func (k *Key) Serialize() ([]byte, error) {
- return json.Marshal(k)
-}
-
-// DeserializeKey is used to deserialize and return a new key
-func DeserializeKey(buf []byte) (*Key, error) {
- k := new(Key)
- if err := jsonutil.DecodeJSON(buf, k); err != nil {
- return nil, errwrap.Wrapf("deserialization failed: {{err}}", err)
- }
- return k, nil
-}
-
-// NewKeyring creates a new keyring
-func NewKeyring() *Keyring {
- k := &Keyring{
- keys: make(map[uint32]*Key),
- activeTerm: 0,
- }
- return k
-}
-
-// Clone returns a new copy of the keyring
-func (k *Keyring) Clone() *Keyring {
- clone := &Keyring{
- masterKey: k.masterKey,
- keys: make(map[uint32]*Key, len(k.keys)),
- activeTerm: k.activeTerm,
- }
- for idx, key := range k.keys {
- clone.keys[idx] = key
- }
- return clone
-}
-
-// AddKey adds a new key to the keyring
-func (k *Keyring) AddKey(key *Key) (*Keyring, error) {
- // Ensure there is no conflict
- if exist, ok := k.keys[key.Term]; ok {
- if !bytes.Equal(key.Value, exist.Value) {
- return nil, fmt.Errorf("conflicting key for term %d already installed", key.Term)
- }
- return k, nil
- }
-
- // Add a time if none
- if key.InstallTime.IsZero() {
- key.InstallTime = time.Now()
- }
-
- // Make a new keyring
- clone := k.Clone()
-
- // Install the new key
- clone.keys[key.Term] = key
-
- // Update the active term if newer
- if key.Term > clone.activeTerm {
- clone.activeTerm = key.Term
- }
- return clone, nil
-}
-
-// RemoveKey removes a key from the keyring
-func (k *Keyring) RemoveKey(term uint32) (*Keyring, error) {
- // Ensure this is not the active key
- if term == k.activeTerm {
- return nil, fmt.Errorf("cannot remove active key")
- }
-
- // Check if this term does not exist
- if _, ok := k.keys[term]; !ok {
- return k, nil
- }
-
- // Delete the key
- clone := k.Clone()
- delete(clone.keys, term)
- return clone, nil
-}
-
-// ActiveTerm returns the currently active term
-func (k *Keyring) ActiveTerm() uint32 {
- return k.activeTerm
-}
-
-// ActiveKey returns the active encryption key, or nil
-func (k *Keyring) ActiveKey() *Key {
- return k.keys[k.activeTerm]
-}
-
-// TermKey returns the key for the given term, or nil
-func (k *Keyring) TermKey(term uint32) *Key {
- return k.keys[term]
-}
-
-// SetMasterKey is used to update the master key
-func (k *Keyring) SetMasterKey(val []byte) *Keyring {
- valCopy := make([]byte, len(val))
- copy(valCopy, val)
- clone := k.Clone()
- clone.masterKey = valCopy
- return clone
-}
-
-// MasterKey returns the master key
-func (k *Keyring) MasterKey() []byte {
- return k.masterKey
-}
-
-// Serialize is used to create a byte encoded keyring
-func (k *Keyring) Serialize() ([]byte, error) {
- // Create the encoded entry
- enc := EncodedKeyring{
- MasterKey: k.masterKey,
- }
- for _, key := range k.keys {
- enc.Keys = append(enc.Keys, key)
- }
-
- // JSON encode the keyring
- buf, err := json.Marshal(enc)
- return buf, err
-}
-
-// DeserializeKeyring is used to deserialize and return a new keyring
-func DeserializeKeyring(buf []byte) (*Keyring, error) {
- // Deserialize the keyring
- var enc EncodedKeyring
- if err := jsonutil.DecodeJSON(buf, &enc); err != nil {
- return nil, errwrap.Wrapf("deserialization failed: {{err}}", err)
- }
-
- // Create a new keyring
- k := NewKeyring()
- k.masterKey = enc.MasterKey
- for _, key := range enc.Keys {
- k.keys[key.Term] = key
- if key.Term > k.activeTerm {
- k.activeTerm = key.Term
- }
- }
- return k, nil
-}
-
-// N.B.:
-// Since Go 1.5 these are not reliable; see the documentation around the memzero
-// function. These are best-effort.
-func (k *Keyring) Zeroize(keysToo bool) {
- if k == nil {
- return
- }
- if k.masterKey != nil {
- memzero(k.masterKey)
- }
- if !keysToo || k.keys == nil {
- return
- }
- for _, key := range k.keys {
- memzero(key.Value)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go b/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
deleted file mode 100644
index 5f45d9e5..00000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_cubbyhole.go
+++ /dev/null
@@ -1,240 +0,0 @@
-package vault
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// CubbyholeBackendFactory constructs a new cubbyhole backend
-func CubbyholeBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
- b := &CubbyholeBackend{}
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(cubbyholeHelp),
- }
-
- b.Backend.Paths = append(b.Backend.Paths, b.paths()...)
-
- if conf == nil {
- return nil, fmt.Errorf("configuration passed into backend is nil")
- }
- b.Backend.Setup(ctx, conf)
-
- return b, nil
-}
-
-// CubbyholeBackend is used for storing secrets directly into the physical
-// backend. The secrets are encrypted in the durable storage.
-// This differs from kv in that every token has its own private
-// storage view. The view is removed when the token expires.
-type CubbyholeBackend struct {
- *framework.Backend
-
- saltUUID string
- storageView logical.Storage
-}
-
-func (b *CubbyholeBackend) paths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: framework.MatchAllRegex("path"),
-
- Fields: map[string]*framework.FieldSchema{
- "path": {
- Type: framework.TypeString,
- Description: "Specifies the path of the secret.",
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleRead,
- Summary: "Retrieve the secret at the specified location.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleWrite,
- Summary: "Store a secret at the specified location.",
- },
- logical.CreateOperation: &framework.PathOperation{
- Callback: b.handleWrite,
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleDelete,
- Summary: "Deletes the secret at the specified location.",
- },
- logical.ListOperation: &framework.PathOperation{
- Callback: b.handleList,
- Summary: "List secret entries at the specified location.",
- Description: "Folders are suffixed with /. The input must be a folder; list on a file will not return a value. The values themselves are not accessible via this command.",
- },
- },
-
- ExistenceCheck: b.handleExistenceCheck,
-
- HelpSynopsis: strings.TrimSpace(cubbyholeHelpSynopsis),
- HelpDescription: strings.TrimSpace(cubbyholeHelpDescription),
- },
- }
-}
-
-func (b *CubbyholeBackend) revoke(ctx context.Context, saltedToken string) error {
- if saltedToken == "" {
- return fmt.Errorf("client token empty during revocation")
- }
-
- if err := logical.ClearView(ctx, b.storageView.(*BarrierView).SubView(saltedToken+"/")); err != nil {
- return err
- }
-
- return nil
-}
-
-func (b *CubbyholeBackend) handleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
- out, err := req.Storage.Get(ctx, req.ClientToken+"/"+req.Path)
- if err != nil {
- return false, errwrap.Wrapf("existence check failed: {{err}}", err)
- }
-
- return out != nil, nil
-}
-
-func (b *CubbyholeBackend) handleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("client token empty")
- }
-
- path := data.Get("path").(string)
-
- // Read the path
- out, err := req.Storage.Get(ctx, req.ClientToken+"/"+path)
- if err != nil {
- return nil, errwrap.Wrapf("read failed: {{err}}", err)
- }
-
- // Fast-path the no data case
- if out == nil {
- return nil, nil
- }
-
- // Decode the data
- var rawData map[string]interface{}
- if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil {
- return nil, errwrap.Wrapf("json decoding failed: {{err}}", err)
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: rawData,
- }
-
- return resp, nil
-}
-
-func (b *CubbyholeBackend) handleWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("client token empty")
- }
- // Check that some fields are given
- if len(req.Data) == 0 {
- return nil, fmt.Errorf("missing data fields")
- }
-
- path := data.Get("path").(string)
-
- // JSON encode the data
- buf, err := json.Marshal(req.Data)
- if err != nil {
- return nil, errwrap.Wrapf("json encoding failed: {{err}}", err)
- }
-
- // Write out a new key
- entry := &logical.StorageEntry{
- Key: req.ClientToken + "/" + path,
- Value: buf,
- }
- if req.WrapInfo != nil && req.WrapInfo.SealWrap {
- entry.SealWrap = true
- }
- if err := req.Storage.Put(ctx, entry); err != nil {
- return nil, errwrap.Wrapf("failed to write: {{err}}", err)
- }
-
- return nil, nil
-}
-
-func (b *CubbyholeBackend) handleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("client token empty")
- }
-
- path := data.Get("path").(string)
-
- // Delete the key at the request path
- if err := req.Storage.Delete(ctx, req.ClientToken+"/"+path); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *CubbyholeBackend) handleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- return nil, fmt.Errorf("client token empty")
- }
-
- // Right now we only handle directories, so ensure it ends with / We also
- // check if it's empty so we don't end up doing a listing on '//'
- path := data.Get("path").(string)
- if path != "" && !strings.HasSuffix(path, "/") {
- path = path + "/"
- }
-
- // List the keys at the prefix given by the request
- keys, err := req.Storage.List(ctx, req.ClientToken+"/"+path)
- if err != nil {
- return nil, err
- }
-
- // Strip the token
- strippedKeys := make([]string, len(keys))
- for i, key := range keys {
- strippedKeys[i] = strings.TrimPrefix(key, req.ClientToken+"/")
- }
-
- // Generate the response
- return logical.ListResponse(strippedKeys), nil
-}
-
-const cubbyholeHelp = `
-The cubbyhole backend reads and writes arbitrary secrets to the backend.
-The secrets are encrypted/decrypted by Vault: they are never stored
-unencrypted in the backend and the backend never has an opportunity to
-see the unencrypted value.
-
-This backend differs from the 'kv' backend in that it is namespaced
-per-token. Tokens can only read and write their own values, with no
-sharing possible (per-token cubbyholes). This can be useful for implementing
-certain authentication workflows, as well as "scratch" areas for individual
-clients. When the token is revoked, the entire set of stored values for that
-token is also removed.
-`
-
-const cubbyholeHelpSynopsis = `
-Pass-through secret storage to a token-specific cubbyhole in the storage
-backend, allowing you to read/write arbitrary data into secret storage.
-`
-
-const cubbyholeHelpDescription = `
-The cubbyhole backend reads and writes arbitrary data into secret storage,
-encrypting it along the way.
-
-The view into the cubbyhole storage space is different for each token; it is
-a per-token cubbyhole. When the token is revoked all values are removed.
-`
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go b/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
deleted file mode 100644
index 6c10cc7b..00000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_passthrough.go
+++ /dev/null
@@ -1,252 +0,0 @@
-package vault
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-// PassthroughBackendFactory returns a PassthroughBackend
-// with leases switched off
-func PassthroughBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
- return LeaseSwitchedPassthroughBackend(ctx, conf, false)
-}
-
-// LeasedPassthroughBackendFactory returns a PassthroughBackend
-// with leases switched on
-func LeasedPassthroughBackendFactory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
- return LeaseSwitchedPassthroughBackend(ctx, conf, true)
-}
-
-// LeaseSwitchedPassthroughBackend returns a PassthroughBackend
-// with leases switched on or off
-func LeaseSwitchedPassthroughBackend(ctx context.Context, conf *logical.BackendConfig, leases bool) (logical.Backend, error) {
- var b PassthroughBackend
- b.generateLeases = leases
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(passthroughHelp),
-
- PathsSpecial: &logical.Paths{
- SealWrapStorage: []string{
- "*",
- },
- },
-
- Paths: []*framework.Path{
- {
- Pattern: ".*",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRead,
- logical.CreateOperation: b.handleWrite,
- logical.UpdateOperation: b.handleWrite,
- logical.DeleteOperation: b.handleDelete,
- logical.ListOperation: b.handleList,
- },
-
- ExistenceCheck: b.handleExistenceCheck,
-
- HelpSynopsis: strings.TrimSpace(passthroughHelpSynopsis),
- HelpDescription: strings.TrimSpace(passthroughHelpDescription),
- },
- },
- BackendType: logical.TypeLogical,
- }
-
- b.Backend.Secrets = []*framework.Secret{
- &framework.Secret{
- Type: "kv",
-
- Renew: b.handleRead,
- Revoke: b.handleRevoke,
- },
- }
-
- if conf == nil {
- return nil, fmt.Errorf("configuration passed into backend is nil")
- }
- b.Backend.Setup(ctx, conf)
-
- return &b, nil
-}
-
-// PassthroughBackend is used storing secrets directly into the physical
-// backend. The secrets are encrypted in the durable storage and custom TTL
-// information can be specified, but otherwise this backend doesn't do anything
-// fancy.
-type PassthroughBackend struct {
- *framework.Backend
- generateLeases bool
-}
-
-func (b *PassthroughBackend) handleRevoke(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // This is a no-op
- return nil, nil
-}
-
-func (b *PassthroughBackend) handleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
- out, err := req.Storage.Get(ctx, req.Path)
- if err != nil {
- return false, errwrap.Wrapf("existence check failed: {{err}}", err)
- }
-
- return out != nil, nil
-}
-
-func (b *PassthroughBackend) handleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Read the path
- out, err := req.Storage.Get(ctx, req.Path)
- if err != nil {
- return nil, errwrap.Wrapf("read failed: {{err}}", err)
- }
-
- // Fast-path the no data case
- if out == nil {
- return nil, nil
- }
-
- // Decode the data
- var rawData map[string]interface{}
-
- if err := jsonutil.DecodeJSON(out.Value, &rawData); err != nil {
- return nil, errwrap.Wrapf("json decoding failed: {{err}}", err)
- }
-
- var resp *logical.Response
- if b.generateLeases {
- // Generate the response
- resp = b.Secret("kv").Response(rawData, nil)
- resp.Secret.Renewable = false
- } else {
- resp = &logical.Response{
- Secret: &logical.Secret{},
- Data: rawData,
- }
- }
-
- // Ensure seal wrapping is carried through if the response is
- // response-wrapped
- if out.SealWrap {
- if resp.WrapInfo == nil {
- resp.WrapInfo = &wrapping.ResponseWrapInfo{}
- }
- resp.WrapInfo.SealWrap = out.SealWrap
- }
-
- // Check if there is a ttl key
- ttlDuration := b.System().DefaultLeaseTTL()
- ttlRaw, ok := rawData["ttl"]
- if !ok {
- ttlRaw, ok = rawData["lease"]
- }
- if ok {
- dur, err := parseutil.ParseDurationSecond(ttlRaw)
- if err == nil {
- ttlDuration = dur
- }
-
- if b.generateLeases {
- resp.Secret.Renewable = true
- }
- }
-
- resp.Secret.TTL = ttlDuration
-
- return resp, nil
-}
-
-func (b *PassthroughBackend) GeneratesLeases() bool {
- return b.generateLeases
-}
-
-func (b *PassthroughBackend) handleWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.Path == "" {
- return logical.ErrorResponse("missing path"), nil
- }
-
- // Check that some fields are given
- if len(req.Data) == 0 {
- return logical.ErrorResponse("missing data fields"), nil
- }
-
- // JSON encode the data
- buf, err := json.Marshal(req.Data)
- if err != nil {
- return nil, errwrap.Wrapf("json encoding failed: {{err}}", err)
- }
-
- // Write out a new key
- entry := &logical.StorageEntry{
- Key: req.Path,
- Value: buf,
- }
- if err := req.Storage.Put(ctx, entry); err != nil {
- return nil, errwrap.Wrapf("failed to write: {{err}}", err)
- }
-
- return nil, nil
-}
-
-func (b *PassthroughBackend) handleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Delete the key at the request path
- if err := req.Storage.Delete(ctx, req.Path); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *PassthroughBackend) handleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Right now we only handle directories, so ensure it ends with /; however,
- // some physical backends may not handle the "/" case properly, so only add
- // it if we're not listing the root
- path := req.Path
- if path != "" && !strings.HasSuffix(path, "/") {
- path = path + "/"
- }
-
- // List the keys at the prefix given by the request
- keys, err := req.Storage.List(ctx, path)
- if err != nil {
- return nil, err
- }
-
- // Generate the response
- return logical.ListResponse(keys), nil
-}
-
-const passthroughHelp = `
-The kv backend reads and writes arbitrary secrets to the backend.
-The secrets are encrypted/decrypted by Vault: they are never stored
-unencrypted in the backend and the backend never has an opportunity to
-see the unencrypted value.
-
-TTLs can be set on a per-secret basis. These TTLs will be sent down
-when that secret is read, and it is assumed that some outside process will
-revoke and/or replace the secret at that path.
-`
-
-const passthroughHelpSynopsis = `
-Pass-through secret storage to the storage backend, allowing you to
-read/write arbitrary data into secret storage.
-`
-
-const passthroughHelpDescription = `
-The pass-through backend reads and writes arbitrary data into secret storage,
-encrypting it along the way.
-
-A TTL can be specified when writing with the "ttl" field. If given, the
-duration of leases returned by this backend will be set to this value. This
-can be used as a hint from the writer of a secret to the consumer of a secret
-that the consumer should re-read the value before the TTL has expired.
-However, any revocation must be handled by the user of this backend; the lease
-duration does not affect the provided data in any way.
-`
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system.go b/vendor/github.com/hashicorp/vault/vault/logical_system.go
deleted file mode 100644
index 20942bb8..00000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_system.go
+++ /dev/null
@@ -1,3845 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/sha256"
- "crypto/sha512"
- "encoding/base64"
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
- "hash"
- "net/http"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/errwrap"
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-memdb"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/compressutil"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/mitchellh/mapstructure"
-)
-
-var (
- // protectedPaths cannot be accessed via the raw APIs.
- // This is both for security and to prevent disrupting Vault.
- protectedPaths = []string{
- keyringPath,
- // Changing the cluster info path can change the cluster ID which can be disruptive
- coreLocalClusterInfoPath,
- }
-)
-
-func systemBackendMemDBSchema() *memdb.DBSchema {
- systemSchema := &memdb.DBSchema{
- Tables: make(map[string]*memdb.TableSchema),
- }
-
- schemas := getSystemSchemas()
-
- for _, schemaFunc := range schemas {
- schema := schemaFunc()
- if _, ok := systemSchema.Tables[schema.Name]; ok {
- panic(fmt.Sprintf("duplicate table name: %s", schema.Name))
- }
- systemSchema.Tables[schema.Name] = schema
- }
-
- return systemSchema
-}
-
-func NewSystemBackend(core *Core, logger log.Logger) *SystemBackend {
- db, _ := memdb.NewMemDB(systemBackendMemDBSchema())
-
- b := &SystemBackend{
- Core: core,
- db: db,
- logger: logger,
- mfaLogger: core.baseLogger.Named("mfa"),
- mfaLock: &sync.RWMutex{},
- }
-
- core.AddLogger(b.mfaLogger)
-
- b.Backend = &framework.Backend{
- Help: strings.TrimSpace(sysHelpRoot),
-
- PathsSpecial: &logical.Paths{
- Root: []string{
- "auth/*",
- "remount",
- "audit",
- "audit/*",
- "raw",
- "raw/*",
- "replication/primary/secondary-token",
- "replication/performance/primary/secondary-token",
- "replication/dr/primary/secondary-token",
- "replication/reindex",
- "replication/dr/reindex",
- "replication/performance/reindex",
- "rotate",
- "config/cors",
- "config/auditing/*",
- "config/ui/headers/*",
- "plugins/catalog/*",
- "revoke-prefix/*",
- "revoke-force/*",
- "leases/revoke-prefix/*",
- "leases/revoke-force/*",
- "leases/lookup/*",
- },
-
- Unauthenticated: []string{
- "wrapping/lookup",
- "wrapping/pubkey",
- "replication/status",
- "internal/specs/openapi",
- "internal/ui/mounts",
- "internal/ui/mounts/*",
- "internal/ui/namespaces",
- "replication/performance/status",
- "replication/dr/status",
- "replication/dr/secondary/promote",
- "replication/dr/secondary/update-primary",
- "replication/dr/secondary/operation-token/delete",
- "replication/dr/secondary/license",
- "replication/dr/secondary/reindex",
- },
-
- LocalStorage: []string{
- expirationSubPath,
- },
- },
- }
-
- b.Backend.Paths = append(b.Backend.Paths, entPaths(b)...)
- b.Backend.Paths = append(b.Backend.Paths, b.configPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.rekeyPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.sealPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.pluginsCatalogListPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.pluginsCatalogCRUDPath())
- b.Backend.Paths = append(b.Backend.Paths, b.pluginsReloadPath())
- b.Backend.Paths = append(b.Backend.Paths, b.auditPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.mountPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.authPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.leasePaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.policyPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.wrappingPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.toolsPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.capabilitiesPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.internalPaths()...)
- b.Backend.Paths = append(b.Backend.Paths, b.remountPath())
-
- if core.rawEnabled {
- b.Backend.Paths = append(b.Backend.Paths, &framework.Path{
- Pattern: "(raw/?$|raw/(?P.+))",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- "value": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleRawRead,
- Summary: "Read the value of the key at the given path.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleRawWrite,
- Summary: "Update the value of the key at the given path.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleRawDelete,
- Summary: "Delete the key with given path.",
- },
- logical.ListOperation: &framework.PathOperation{
- Callback: b.handleRawList,
- Summary: "Return a list keys for a given path prefix.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["raw"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["raw"][1]),
- })
- }
-
- b.Backend.Invalidate = sysInvalidate(b)
- return b
-}
-
-// SystemBackend implements logical.Backend and is used to interact with
-// the core of the system. This backend is hardcoded to exist at the "sys"
-// prefix. Conceptually it is similar to procfs on Linux.
-type SystemBackend struct {
- *framework.Backend
- Core *Core
- db *memdb.MemDB
- mfaLock *sync.RWMutex
- mfaLogger log.Logger
- logger log.Logger
-}
-
-// handleCORSRead returns the current CORS configuration
-func (b *SystemBackend) handleCORSRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- corsConf := b.Core.corsConfig
-
- enabled := corsConf.IsEnabled()
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "enabled": enabled,
- },
- }
-
- if enabled {
- corsConf.RLock()
- resp.Data["allowed_origins"] = corsConf.AllowedOrigins
- resp.Data["allowed_headers"] = corsConf.AllowedHeaders
- corsConf.RUnlock()
- }
-
- return resp, nil
-}
-
-// handleCORSUpdate sets the list of origins that are allowed to make
-// cross-origin requests and sets the CORS enabled flag to true
-func (b *SystemBackend) handleCORSUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- origins := d.Get("allowed_origins").([]string)
- headers := d.Get("allowed_headers").([]string)
-
- return nil, b.Core.corsConfig.Enable(ctx, origins, headers)
-}
-
-// handleCORSDelete sets the CORS enabled flag to false and clears the list of
-// allowed origins & headers.
-func (b *SystemBackend) handleCORSDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return nil, b.Core.corsConfig.Disable(ctx)
-}
-
-func (b *SystemBackend) handleTidyLeases(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- go func() {
- tidyCtx := namespace.ContextWithNamespace(b.Core.activeContext, ns)
- err := b.Core.expiration.Tidy(tidyCtx)
- if err != nil {
- b.Backend.Logger().Error("failed to tidy leases", "error", err)
- return
- }
- }()
-
- resp := &logical.Response{}
- resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.")
- return logical.RespondWithStatusCode(resp, req, http.StatusAccepted)
-}
-
-func (b *SystemBackend) handlePluginCatalogTypedList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginType, err := consts.ParsePluginType(d.Get("type").(string))
- if err != nil {
- return nil, err
- }
-
- plugins, err := b.Core.pluginCatalog.List(ctx, pluginType)
- if err != nil {
- return nil, err
- }
- return logical.ListResponse(plugins), nil
-}
-
-func (b *SystemBackend) handlePluginCatalogUntypedList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginsByType := make(map[string]interface{})
- for _, pluginType := range consts.PluginTypes {
- plugins, err := b.Core.pluginCatalog.List(ctx, pluginType)
- if err != nil {
- return nil, err
- }
- if len(plugins) > 0 {
- sort.Strings(plugins)
- pluginsByType[pluginType.String()] = plugins
- }
- }
- return &logical.Response{
- Data: pluginsByType,
- }, nil
-}
-
-func (b *SystemBackend) handlePluginCatalogUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("name").(string)
- if pluginName == "" {
- return logical.ErrorResponse("missing plugin name"), nil
- }
-
- pluginTypeStr := d.Get("type").(string)
- if pluginTypeStr == "" {
- // If the plugin type is not provided, list it as unknown so that we
- // add it to the catalog and UpdatePlugins later will sort it.
- pluginTypeStr = "unknown"
- }
- pluginType, err := consts.ParsePluginType(pluginTypeStr)
- if err != nil {
- return nil, err
- }
-
- sha256 := d.Get("sha256").(string)
- if sha256 == "" {
- sha256 = d.Get("sha_256").(string)
- if sha256 == "" {
- return logical.ErrorResponse("missing SHA-256 value"), nil
- }
- }
-
- command := d.Get("command").(string)
- if command == "" {
- return logical.ErrorResponse("missing command value"), nil
- }
-
- // For backwards compatibility, also accept args as part of command. Don't
- // accepts args in both command and args.
- args := d.Get("args").([]string)
- parts := strings.Split(command, " ")
- if len(parts) <= 0 {
- return logical.ErrorResponse("missing command value"), nil
- } else if len(parts) > 1 && len(args) > 0 {
- return logical.ErrorResponse("must not specify args in command and args field"), nil
- } else if len(parts) > 1 {
- args = parts[1:]
- }
-
- env := d.Get("env").([]string)
-
- sha256Bytes, err := hex.DecodeString(sha256)
- if err != nil {
- return logical.ErrorResponse("Could not decode SHA-256 value from Hex"), err
- }
-
- err = b.Core.pluginCatalog.Set(ctx, pluginName, pluginType, parts[0], args, env, sha256Bytes)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (b *SystemBackend) handlePluginCatalogRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("name").(string)
- if pluginName == "" {
- return logical.ErrorResponse("missing plugin name"), nil
- }
-
- pluginTypeStr := d.Get("type").(string)
- if pluginTypeStr == "" {
- // If the plugin type is not provided (i.e. the old
- // sys/plugins/catalog/:name endpoint is being requested) short-circuit here
- // and return a warning
- resp := &logical.Response{}
- resp.AddWarning(fmt.Sprintf("Deprecated API endpoint, cannot read plugin information from catalog for %q", pluginName))
- return resp, nil
- }
-
- pluginType, err := consts.ParsePluginType(pluginTypeStr)
- if err != nil {
- return nil, err
- }
-
- plugin, err := b.Core.pluginCatalog.Get(ctx, pluginName, pluginType)
- if err != nil {
- return nil, err
- }
- if plugin == nil {
- return nil, nil
- }
-
- command := ""
- if !plugin.Builtin {
- command, err = filepath.Rel(b.Core.pluginCatalog.directory, plugin.Command)
- if err != nil {
- return nil, err
- }
- }
-
- data := map[string]interface{}{
- "name": plugin.Name,
- "args": plugin.Args,
- "command": command,
- "sha256": hex.EncodeToString(plugin.Sha256),
- "builtin": plugin.Builtin,
- }
-
- return &logical.Response{
- Data: data,
- }, nil
-}
-
-func (b *SystemBackend) handlePluginCatalogDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("name").(string)
- if pluginName == "" {
- return logical.ErrorResponse("missing plugin name"), nil
- }
-
- var resp *logical.Response
- pluginTypeStr := d.Get("type").(string)
- if pluginTypeStr == "" {
- // If the plugin type is not provided (i.e. the old
- // sys/plugins/catalog/:name endpoint is being requested), set type to
- // unknown and let pluginCatalog.Delete proceed. It should handle
- // deregistering out of the old storage path (root of core/plugin-catalog)
- resp = new(logical.Response)
- resp.AddWarning(fmt.Sprintf("Deprecated API endpoint, cannot deregister plugin from catalog for %q", pluginName))
- pluginTypeStr = "unknown"
- }
-
- pluginType, err := consts.ParsePluginType(pluginTypeStr)
- if err != nil {
- return nil, err
- }
- if err := b.Core.pluginCatalog.Delete(ctx, pluginName, pluginType); err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) handlePluginReloadUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- pluginName := d.Get("plugin").(string)
- pluginMounts := d.Get("mounts").([]string)
-
- if pluginName != "" && len(pluginMounts) > 0 {
- return logical.ErrorResponse("plugin and mounts cannot be set at the same time"), nil
- }
- if pluginName == "" && len(pluginMounts) == 0 {
- return logical.ErrorResponse("plugin or mounts must be provided"), nil
- }
-
- if pluginName != "" {
- err := b.Core.reloadMatchingPlugin(ctx, pluginName)
- if err != nil {
- return nil, err
- }
- } else if len(pluginMounts) > 0 {
- err := b.Core.reloadMatchingPluginMounts(ctx, pluginMounts)
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
-}
-
-// handleAuditedHeaderUpdate creates or overwrites a header entry
-func (b *SystemBackend) handleAuditedHeaderUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- header := d.Get("header").(string)
- hmac := d.Get("hmac").(bool)
- if header == "" {
- return logical.ErrorResponse("missing header name"), nil
- }
-
- headerConfig := b.Core.AuditedHeadersConfig()
- err := headerConfig.add(ctx, header, hmac)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// handleAuditedHeaderDelete deletes the header with the given name
-func (b *SystemBackend) handleAuditedHeaderDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- header := d.Get("header").(string)
- if header == "" {
- return logical.ErrorResponse("missing header name"), nil
- }
-
- headerConfig := b.Core.AuditedHeadersConfig()
- err := headerConfig.remove(ctx, header)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// handleAuditedHeaderRead returns the header configuration for the given header name
-func (b *SystemBackend) handleAuditedHeaderRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- header := d.Get("header").(string)
- if header == "" {
- return logical.ErrorResponse("missing header name"), nil
- }
-
- headerConfig := b.Core.AuditedHeadersConfig()
- settings, ok := headerConfig.Headers[strings.ToLower(header)]
- if !ok {
- return logical.ErrorResponse("Could not find header in config"), nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- header: settings,
- },
- }, nil
-}
-
-// handleAuditedHeadersRead returns the whole audited headers config
-func (b *SystemBackend) handleAuditedHeadersRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- headerConfig := b.Core.AuditedHeadersConfig()
-
- return &logical.Response{
- Data: map[string]interface{}{
- "headers": headerConfig.Headers,
- },
- }, nil
-}
-
-// handleCapabilitiesAccessor returns the ACL capabilities of the
-// token associated with the given accessor for a given path.
-func (b *SystemBackend) handleCapabilitiesAccessor(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- accessor := d.Get("accessor").(string)
- if accessor == "" {
- return logical.ErrorResponse("missing accessor"), nil
- }
-
- aEntry, err := b.Core.tokenStore.lookupByAccessor(ctx, accessor, false, false)
- if err != nil {
- return nil, err
- }
-
- d.Raw["token"] = aEntry.TokenID
- return b.handleCapabilities(ctx, req, d)
-}
-
-// handleCapabilities returns the ACL capabilities of the token for a given path
-func (b *SystemBackend) handleCapabilities(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- var token string
- if strings.HasSuffix(req.Path, "capabilities-self") {
- token = req.ClientToken
- } else {
- tokenRaw, ok := d.Raw["token"]
- if ok {
- token, _ = tokenRaw.(string)
- }
- }
- if token == "" {
- return nil, fmt.Errorf("no token found")
- }
-
- ret := &logical.Response{
- Data: map[string]interface{}{},
- }
-
- paths := d.Get("paths").([]string)
- if len(paths) == 0 {
- // Read from the deprecated field
- paths = d.Get("path").([]string)
- }
-
- if len(paths) == 0 {
- return logical.ErrorResponse("paths must be supplied"), nil
- }
-
- for _, path := range paths {
- pathCap, err := b.Core.Capabilities(ctx, token, path)
- if err != nil {
- if !strings.HasSuffix(req.Path, "capabilities-self") && errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
- return nil, &logical.StatusBadRequest{Err: "invalid token"}
- }
- return nil, err
- }
- ret.Data[path] = pathCap
- }
-
- // This is only here for backwards compatibility
- if len(paths) == 1 {
- ret.Data["capabilities"] = ret.Data[paths[0]]
- }
-
- return ret, nil
-}
-
-// handleRekeyRetrieve returns backed-up, PGP-encrypted unseal keys from a
-// rekey operation
-func (b *SystemBackend) handleRekeyRetrieve(
- ctx context.Context,
- req *logical.Request,
- data *framework.FieldData,
- recovery bool) (*logical.Response, error) {
- backup, err := b.Core.RekeyRetrieveBackup(ctx, recovery)
- if err != nil {
- return nil, errwrap.Wrapf("unable to look up backed-up keys: {{err}}", err)
- }
- if backup == nil {
- return logical.ErrorResponse("no backed-up keys found"), nil
- }
-
- keysB64 := map[string][]string{}
- for k, v := range backup.Keys {
- for _, j := range v {
- currB64Keys := keysB64[k]
- if currB64Keys == nil {
- currB64Keys = []string{}
- }
- key, err := hex.DecodeString(j)
- if err != nil {
- return nil, errwrap.Wrapf("error decoding hex-encoded backup key: {{err}}", err)
- }
- currB64Keys = append(currB64Keys, base64.StdEncoding.EncodeToString(key))
- keysB64[k] = currB64Keys
- }
- }
-
- // Format the status
- resp := &logical.Response{
- Data: map[string]interface{}{
- "nonce": backup.Nonce,
- "keys": backup.Keys,
- "keys_base64": keysB64,
- },
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) handleRekeyRetrieveBarrier(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyRetrieve(ctx, req, data, false)
-}
-
-func (b *SystemBackend) handleRekeyRetrieveRecovery(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyRetrieve(ctx, req, data, true)
-}
-
-// handleRekeyDelete deletes backed-up, PGP-encrypted unseal keys from a rekey
-// operation
-func (b *SystemBackend) handleRekeyDelete(
- ctx context.Context,
- req *logical.Request,
- data *framework.FieldData,
- recovery bool) (*logical.Response, error) {
- err := b.Core.RekeyDeleteBackup(ctx, recovery)
- if err != nil {
- return nil, errwrap.Wrapf("error during deletion of backed-up keys: {{err}}", err)
- }
-
- return nil, nil
-}
-
-func (b *SystemBackend) handleRekeyDeleteBarrier(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyDelete(ctx, req, data, false)
-}
-
-func (b *SystemBackend) handleRekeyDeleteRecovery(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRekeyDelete(ctx, req, data, true)
-}
-
-func mountInfo(entry *MountEntry) map[string]interface{} {
- info := map[string]interface{}{
- "type": entry.Type,
- "description": entry.Description,
- "accessor": entry.Accessor,
- "local": entry.Local,
- "seal_wrap": entry.SealWrap,
- "options": entry.Options,
- }
- entryConfig := map[string]interface{}{
- "default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
- "max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
- "force_no_cache": entry.Config.ForceNoCache,
- }
- if rawVal, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
- entryConfig["audit_non_hmac_request_keys"] = rawVal.([]string)
- }
- if rawVal, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_response_keys"); ok {
- entryConfig["audit_non_hmac_response_keys"] = rawVal.([]string)
- }
- // Even though empty value is valid for ListingVisibility, we can ignore
- // this case during mount since there's nothing to unset/hide.
- if len(entry.Config.ListingVisibility) > 0 {
- entryConfig["listing_visibility"] = entry.Config.ListingVisibility
- }
- if rawVal, ok := entry.synthesizedConfigCache.Load("passthrough_request_headers"); ok {
- entryConfig["passthrough_request_headers"] = rawVal.([]string)
- }
- if entry.Table == credentialTableType {
- entryConfig["token_type"] = entry.Config.TokenType.String()
- }
-
- info["config"] = entryConfig
-
- return info
-}
-
-// handleMountTable handles the "mounts" endpoint to provide the mount table
-func (b *SystemBackend) handleMountTable(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- b.Core.mountsLock.RLock()
- defer b.Core.mountsLock.RUnlock()
-
- resp := &logical.Response{
- Data: make(map[string]interface{}),
- }
-
- for _, entry := range b.Core.mounts.Entries {
- // Only show entries for current namespace
- if entry.Namespace().Path != ns.Path {
- continue
- }
-
- cont, err := b.Core.checkReplicatedFiltering(ctx, entry, "")
- if err != nil {
- return nil, err
- }
- if cont {
- continue
- }
-
- // Populate mount info
- info := mountInfo(entry)
- resp.Data[entry.Path] = info
- }
-
- return resp, nil
-}
-
-// handleMount is used to mount a new path
-func (b *SystemBackend) handleMount(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.ReplicationState()
-
- local := data.Get("local").(bool)
- if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
- }
-
- // Get all the options
- path := data.Get("path").(string)
- path = sanitizeMountPath(path)
-
- logicalType := data.Get("type").(string)
- description := data.Get("description").(string)
- pluginName := data.Get("plugin_name").(string)
- sealWrap := data.Get("seal_wrap").(bool)
- options := data.Get("options").(map[string]string)
-
- var config MountConfig
- var apiConfig APIMountConfig
-
- configMap := data.Get("config").(map[string]interface{})
- if configMap != nil && len(configMap) != 0 {
- err := mapstructure.Decode(configMap, &apiConfig)
- if err != nil {
- return logical.ErrorResponse(
- "unable to convert given mount config information"),
- logical.ErrInvalidRequest
- }
- }
-
- switch apiConfig.DefaultLeaseTTL {
- case "":
- case "system":
- default:
- tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)),
- logical.ErrInvalidRequest
- }
- config.DefaultLeaseTTL = tmpDef
- }
-
- switch apiConfig.MaxLeaseTTL {
- case "":
- case "system":
- default:
- tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)),
- logical.ErrInvalidRequest
- }
- config.MaxLeaseTTL = tmpMax
- }
-
- if config.MaxLeaseTTL != 0 && config.DefaultLeaseTTL > config.MaxLeaseTTL {
- return logical.ErrorResponse(
- "given default lease TTL greater than given max lease TTL"),
- logical.ErrInvalidRequest
- }
-
- if config.DefaultLeaseTTL > b.Core.maxLeaseTTL && config.MaxLeaseTTL == 0 {
- return logical.ErrorResponse(fmt.Sprintf(
- "given default lease TTL greater than system max lease TTL of %d", int(b.Core.maxLeaseTTL.Seconds()))),
- logical.ErrInvalidRequest
- }
-
- switch logicalType {
- case "":
- return logical.ErrorResponse(
- "backend type must be specified as a string"),
- logical.ErrInvalidRequest
- case "plugin":
- // Only set plugin-name if mount is of type plugin, with apiConfig.PluginName
- // option taking precedence.
- switch {
- case apiConfig.PluginName != "":
- logicalType = apiConfig.PluginName
- case pluginName != "":
- logicalType = pluginName
- default:
- return logical.ErrorResponse(
- "plugin_name must be provided for plugin backend"),
- logical.ErrInvalidRequest
- }
- }
-
- switch logicalType {
- case "kv":
- case "kv-v1":
- // Alias KV v1
- logicalType = "kv"
- if options == nil {
- options = map[string]string{}
- }
- options["version"] = "1"
-
- case "kv-v2":
- // Alias KV v2
- logicalType = "kv"
- if options == nil {
- options = map[string]string{}
- }
- options["version"] = "2"
-
- default:
- if options != nil && options["version"] != "" {
- return logical.ErrorResponse(fmt.Sprintf(
- "secrets engine %q does not allow setting a version", logicalType)),
- logical.ErrInvalidRequest
- }
- }
-
- // Copy over the force no cache if set
- if apiConfig.ForceNoCache {
- config.ForceNoCache = true
- }
-
- if err := checkListingVisibility(apiConfig.ListingVisibility); err != nil {
- return logical.ErrorResponse(fmt.Sprintf("invalid listing_visibility %s", apiConfig.ListingVisibility)), nil
- }
- config.ListingVisibility = apiConfig.ListingVisibility
-
- if len(apiConfig.AuditNonHMACRequestKeys) > 0 {
- config.AuditNonHMACRequestKeys = apiConfig.AuditNonHMACRequestKeys
- }
- if len(apiConfig.AuditNonHMACResponseKeys) > 0 {
- config.AuditNonHMACResponseKeys = apiConfig.AuditNonHMACResponseKeys
- }
- if len(apiConfig.PassthroughRequestHeaders) > 0 {
- config.PassthroughRequestHeaders = apiConfig.PassthroughRequestHeaders
- }
-
- // Create the mount entry
- me := &MountEntry{
- Table: mountTableType,
- Path: path,
- Type: logicalType,
- Description: description,
- Config: config,
- Local: local,
- SealWrap: sealWrap,
- Options: options,
- }
-
- // Attempt mount
- if err := b.Core.mount(ctx, me); err != nil {
- b.Backend.Logger().Error("mount failed", "path", me.Path, "error", err)
- return handleError(err)
- }
-
- return nil, nil
-}
-
-// used to intercept an HTTPCodedError so it goes back to callee
-func handleError(
- err error) (*logical.Response, error) {
- if strings.Contains(err.Error(), logical.ErrReadOnly.Error()) {
- return logical.ErrorResponse(err.Error()), err
- }
- switch err.(type) {
- case logical.HTTPCodedError:
- return logical.ErrorResponse(err.Error()), err
- default:
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-}
-
-// Performs a similar function to handleError, but upon seeing a ReadOnlyError
-// will actually strip it out to prevent forwarding
-func handleErrorNoReadOnlyForward(
- err error) (*logical.Response, error) {
- if strings.Contains(err.Error(), logical.ErrReadOnly.Error()) {
- return nil, fmt.Errorf("operation could not be completed as storage is read-only")
- }
- switch err.(type) {
- case logical.HTTPCodedError:
- return logical.ErrorResponse(err.Error()), err
- default:
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-}
-
-// handleUnmount is used to unmount a path
-func (b *SystemBackend) handleUnmount(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- path = sanitizeMountPath(path)
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- repState := b.Core.ReplicationState()
- entry := b.Core.router.MatchingMountEntry(ctx, path)
- if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
- }
-
- // We return success when the mount does not exists to not expose if the
- // mount existed or not
- match := b.Core.router.MatchingMount(ctx, path)
- if match == "" || ns.Path+path != match {
- return nil, nil
- }
-
- prefix, found := b.Core.router.MatchingStoragePrefixByAPIPath(ctx, path)
- if !found {
- b.Backend.Logger().Error("unable to find storage for path", "path", path)
- return handleError(fmt.Errorf("unable to find storage for path: %q", path))
- }
-
- // Attempt unmount
- if err := b.Core.unmount(ctx, path); err != nil {
- b.Backend.Logger().Error("unmount failed", "path", path, "error", err)
- return handleError(err)
- }
-
- // Remove from filtered mounts
- if err := b.Core.removePrefixFromFilteredPaths(ctx, prefix); err != nil {
- b.Backend.Logger().Error("filtered path removal failed", path, "error", err)
- return handleError(err)
- }
-
- return nil, nil
-}
-
-// handleRemount is used to remount a path
-func (b *SystemBackend) handleRemount(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.ReplicationState()
-
- // Get the paths
- fromPath := data.Get("from").(string)
- toPath := data.Get("to").(string)
- if fromPath == "" || toPath == "" {
- return logical.ErrorResponse(
- "both 'from' and 'to' path must be specified as a string"),
- logical.ErrInvalidRequest
- }
-
- entry := b.Core.router.MatchingMountEntry(ctx, fromPath)
- if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot remount a non-local mount on a replication secondary"), nil
- }
-
- // Attempt remount
- if err := b.Core.remount(ctx, fromPath, toPath); err != nil {
- b.Backend.Logger().Error("remount failed", "from_path", fromPath, "to_path", toPath, "error", err)
- return handleError(err)
- }
-
- return nil, nil
-}
-
-// handleAuthTuneRead is used to get config settings on a auth path
-func (b *SystemBackend) handleAuthTuneRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse(
- "path must be specified as a string"),
- logical.ErrInvalidRequest
- }
- return b.handleTuneReadCommon(ctx, "auth/"+path)
-}
-
-// handleMountTuneRead is used to get config settings on a backend
-func (b *SystemBackend) handleMountTuneRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse(
- "path must be specified as a string"),
- logical.ErrInvalidRequest
- }
-
- // This call will read both logical backend's configuration as well as auth methods'.
- // Retaining this behavior for backward compatibility. If this behavior is not desired,
- // an error can be returned if path has a prefix of "auth/".
- return b.handleTuneReadCommon(ctx, path)
-}
-
-// handleTuneReadCommon returns the config settings of a path
-func (b *SystemBackend) handleTuneReadCommon(ctx context.Context, path string) (*logical.Response, error) {
- path = sanitizeMountPath(path)
-
- sysView := b.Core.router.MatchingSystemView(ctx, path)
- if sysView == nil {
- b.Backend.Logger().Error("cannot fetch sysview", "path", path)
- return handleError(fmt.Errorf("cannot fetch sysview for path %q", path))
- }
-
- mountEntry := b.Core.router.MatchingMountEntry(ctx, path)
- if mountEntry == nil {
- b.Backend.Logger().Error("cannot fetch mount entry", "path", path)
- return handleError(fmt.Errorf("cannot fetch mount entry for path %q", path))
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "default_lease_ttl": int(sysView.DefaultLeaseTTL().Seconds()),
- "max_lease_ttl": int(sysView.MaxLeaseTTL().Seconds()),
- "force_no_cache": mountEntry.Config.ForceNoCache,
- },
- }
-
- if mountEntry.Table == credentialTableType {
- resp.Data["token_type"] = mountEntry.Config.TokenType.String()
- }
-
- if rawVal, ok := mountEntry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
- resp.Data["audit_non_hmac_request_keys"] = rawVal.([]string)
- }
-
- if rawVal, ok := mountEntry.synthesizedConfigCache.Load("audit_non_hmac_response_keys"); ok {
- resp.Data["audit_non_hmac_response_keys"] = rawVal.([]string)
- }
-
- if len(mountEntry.Config.ListingVisibility) > 0 {
- resp.Data["listing_visibility"] = mountEntry.Config.ListingVisibility
- }
-
- if rawVal, ok := mountEntry.synthesizedConfigCache.Load("passthrough_request_headers"); ok {
- resp.Data["passthrough_request_headers"] = rawVal.([]string)
- }
-
- if len(mountEntry.Options) > 0 {
- resp.Data["options"] = mountEntry.Options
- }
-
- return resp, nil
-}
-
-// handleAuthTuneWrite is used to set config settings on an auth path
-func (b *SystemBackend) handleAuthTuneWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse("missing path"), nil
- }
-
- return b.handleTuneWriteCommon(ctx, "auth/"+path, data)
-}
-
-// handleMountTuneWrite is used to set config settings on a backend
-func (b *SystemBackend) handleMountTuneWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse("missing path"), nil
- }
-
- // This call will write both logical backend's configuration as well as auth methods'.
- // Retaining this behavior for backward compatibility. If this behavior is not desired,
- // an error can be returned if path has a prefix of "auth/".
- return b.handleTuneWriteCommon(ctx, path, data)
-}
-
-// handleTuneWriteCommon is used to set config settings on a path
-func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.ReplicationState()
-
- path = sanitizeMountPath(path)
-
- // Prevent protected paths from being changed
- for _, p := range untunableMounts {
- if strings.HasPrefix(path, p) {
- b.Backend.Logger().Error("cannot tune this mount", "path", path)
- return handleError(fmt.Errorf("cannot tune %q", path))
- }
- }
-
- mountEntry := b.Core.router.MatchingMountEntry(ctx, path)
- if mountEntry == nil {
- b.Backend.Logger().Error("tune failed", "error", "no mount entry found", "path", path)
- return handleError(fmt.Errorf("tune of path %q failed: no mount entry found", path))
- }
- if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
- }
-
- var lock *sync.RWMutex
- switch {
- case strings.HasPrefix(path, credentialRoutePrefix):
- lock = &b.Core.authLock
- default:
- lock = &b.Core.mountsLock
- }
-
- lock.Lock()
- defer lock.Unlock()
-
- // Check again after grabbing the lock
- mountEntry = b.Core.router.MatchingMountEntry(ctx, path)
- if mountEntry == nil {
- b.Backend.Logger().Error("tune failed", "error", "no mount entry found", "path", path)
- return handleError(fmt.Errorf("tune of path %q failed: no mount entry found", path))
- }
- if mountEntry != nil && !mountEntry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
- }
-
- // Timing configuration parameters
- {
- var newDefault, newMax time.Duration
- defTTL := data.Get("default_lease_ttl").(string)
- switch defTTL {
- case "":
- newDefault = mountEntry.Config.DefaultLeaseTTL
- case "system":
- newDefault = time.Duration(0)
- default:
- tmpDef, err := parseutil.ParseDurationSecond(defTTL)
- if err != nil {
- return handleError(err)
- }
- newDefault = tmpDef
- }
-
- maxTTL := data.Get("max_lease_ttl").(string)
- switch maxTTL {
- case "":
- newMax = mountEntry.Config.MaxLeaseTTL
- case "system":
- newMax = time.Duration(0)
- default:
- tmpMax, err := parseutil.ParseDurationSecond(maxTTL)
- if err != nil {
- return handleError(err)
- }
- newMax = tmpMax
- }
-
- if newDefault != mountEntry.Config.DefaultLeaseTTL ||
- newMax != mountEntry.Config.MaxLeaseTTL {
-
- if err := b.tuneMountTTLs(ctx, path, mountEntry, newDefault, newMax); err != nil {
- b.Backend.Logger().Error("tuning failed", "path", path, "error", err)
- return handleError(err)
- }
- }
- }
-
- if rawVal, ok := data.GetOk("description"); ok {
- description := rawVal.(string)
-
- oldDesc := mountEntry.Description
- mountEntry.Description = description
-
- // Update the mount table
- var err error
- switch {
- case strings.HasPrefix(path, "auth/"):
- err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local)
- default:
- err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local)
- }
- if err != nil {
- mountEntry.Description = oldDesc
- return handleError(err)
- }
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("mount tuning of description successful", "path", path)
- }
- }
-
- if rawVal, ok := data.GetOk("audit_non_hmac_request_keys"); ok {
- auditNonHMACRequestKeys := rawVal.([]string)
-
- oldVal := mountEntry.Config.AuditNonHMACRequestKeys
- mountEntry.Config.AuditNonHMACRequestKeys = auditNonHMACRequestKeys
-
- // Update the mount table
- var err error
- switch {
- case strings.HasPrefix(path, "auth/"):
- err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local)
- default:
- err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local)
- }
- if err != nil {
- mountEntry.Config.AuditNonHMACRequestKeys = oldVal
- return handleError(err)
- }
-
- mountEntry.SyncCache()
-
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("mount tuning of audit_non_hmac_request_keys successful", "path", path)
- }
- }
-
- if rawVal, ok := data.GetOk("audit_non_hmac_response_keys"); ok {
- auditNonHMACResponseKeys := rawVal.([]string)
-
- oldVal := mountEntry.Config.AuditNonHMACResponseKeys
- mountEntry.Config.AuditNonHMACResponseKeys = auditNonHMACResponseKeys
-
- // Update the mount table
- var err error
- switch {
- case strings.HasPrefix(path, "auth/"):
- err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local)
- default:
- err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local)
- }
- if err != nil {
- mountEntry.Config.AuditNonHMACResponseKeys = oldVal
- return handleError(err)
- }
-
- mountEntry.SyncCache()
-
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("mount tuning of audit_non_hmac_response_keys successful", "path", path)
- }
- }
-
- if rawVal, ok := data.GetOk("listing_visibility"); ok {
- lvString := rawVal.(string)
- listingVisibility := ListingVisibilityType(lvString)
-
- if err := checkListingVisibility(listingVisibility); err != nil {
- return logical.ErrorResponse(fmt.Sprintf("invalid listing_visibility %s", listingVisibility)), nil
- }
-
- oldVal := mountEntry.Config.ListingVisibility
- mountEntry.Config.ListingVisibility = listingVisibility
-
- // Update the mount table
- var err error
- switch {
- case strings.HasPrefix(path, "auth/"):
- err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local)
- default:
- err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local)
- }
- if err != nil {
- mountEntry.Config.ListingVisibility = oldVal
- return handleError(err)
- }
-
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("mount tuning of listing_visibility successful", "path", path)
- }
- }
-
- if rawVal, ok := data.GetOk("token_type"); ok {
- if !strings.HasPrefix(path, "auth/") {
- return logical.ErrorResponse(fmt.Sprintf("'token_type' can only be modified on auth mounts")), logical.ErrInvalidRequest
- }
- if mountEntry.Type == "token" || mountEntry.Type == "ns_token" {
- return logical.ErrorResponse(fmt.Sprintf("'token_type' cannot be set for 'token' or 'ns_token' auth mounts")), logical.ErrInvalidRequest
- }
-
- tokenType := logical.TokenTypeDefaultService
- ttString := rawVal.(string)
-
- switch ttString {
- case "", "default-service":
- case "default-batch":
- tokenType = logical.TokenTypeDefaultBatch
- case "service":
- tokenType = logical.TokenTypeService
- case "batch":
- tokenType = logical.TokenTypeBatch
- default:
- return logical.ErrorResponse(fmt.Sprintf(
- "invalid value for 'token_type'")), logical.ErrInvalidRequest
- }
-
- oldVal := mountEntry.Config.TokenType
- mountEntry.Config.TokenType = tokenType
-
- // Update the mount table
- if err := b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local); err != nil {
- mountEntry.Config.TokenType = oldVal
- return handleError(err)
- }
-
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("mount tuning of token_type successful", "path", path, "token_type", ttString)
- }
- }
-
- if rawVal, ok := data.GetOk("passthrough_request_headers"); ok {
- headers := rawVal.([]string)
-
- oldVal := mountEntry.Config.PassthroughRequestHeaders
- mountEntry.Config.PassthroughRequestHeaders = headers
-
- // Update the mount table
- var err error
- switch {
- case strings.HasPrefix(path, "auth/"):
- err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local)
- default:
- err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local)
- }
- if err != nil {
- mountEntry.Config.PassthroughRequestHeaders = oldVal
- return handleError(err)
- }
-
- mountEntry.SyncCache()
-
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("mount tuning of passthrough_request_headers successful", "path", path)
- }
- }
-
- var err error
- var resp *logical.Response
- var options map[string]string
- if optionsRaw, ok := data.GetOk("options"); ok {
- options = optionsRaw.(map[string]string)
- }
-
- if len(options) > 0 {
- b.Core.logger.Info("mount tuning of options", "path", path, "options", options)
- newOptions := make(map[string]string)
- var kvUpgraded bool
-
- // The version options should only apply to the KV mount, check that first
- if v, ok := options["version"]; ok {
- // Special case to make sure we can not disable versioning once it's
- // enabled. If the vkv backend suports downgrading this can be removed.
- meVersion, err := parseutil.ParseInt(mountEntry.Options["version"])
- if err != nil {
- return nil, errwrap.Wrapf("unable to parse mount entry: {{err}}", err)
- }
- optVersion, err := parseutil.ParseInt(v)
- if err != nil {
- return handleError(errwrap.Wrapf("unable to parse options: {{err}}", err))
- }
-
- // Only accept valid versions
- switch optVersion {
- case 1:
- case 2:
- default:
- return logical.ErrorResponse(fmt.Sprintf("invalid version provided: %d", optVersion)), logical.ErrInvalidRequest
- }
-
- if meVersion > optVersion {
- // Return early if version option asks for a downgrade
- return logical.ErrorResponse(fmt.Sprintf("cannot downgrade mount from version %d", meVersion)), logical.ErrInvalidRequest
- }
- if meVersion < optVersion {
- kvUpgraded = true
- resp = &logical.Response{}
- resp.AddWarning(fmt.Sprintf("Upgrading mount from version %d to version %d. This mount will be unavailable for a brief period and will resume service shortly.", meVersion, optVersion))
- }
- }
-
- // Upsert options value to a copy of the existing mountEntry's options
- for k, v := range mountEntry.Options {
- newOptions[k] = v
- }
- for k, v := range options {
- // If the value of the provided option is empty, delete the key We
- // special-case the version value here to guard against KV downgrades, but
- // this piece could potentially be refactored in the future to be non-KV
- // specific.
- if len(v) == 0 && k != "version" {
- delete(newOptions, k)
- } else {
- newOptions[k] = v
- }
- }
-
- // Update the mount table
- oldVal := mountEntry.Options
- mountEntry.Options = newOptions
- switch {
- case strings.HasPrefix(path, "auth/"):
- err = b.Core.persistAuth(ctx, b.Core.auth, &mountEntry.Local)
- default:
- err = b.Core.persistMounts(ctx, b.Core.mounts, &mountEntry.Local)
- }
- if err != nil {
- mountEntry.Options = oldVal
- return handleError(err)
- }
-
- // Reload the backend to kick off the upgrade process. It should only apply to KV backend so we
- // trigger based on the version logic above.
- if kvUpgraded {
- b.Core.reloadBackendCommon(ctx, mountEntry, strings.HasPrefix(path, credentialRoutePrefix))
- }
- }
-
- return resp, nil
-}
-
-// handleLease is use to view the metadata for a given LeaseID
-func (b *SystemBackend) handleLeaseLookup(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- leaseID := data.Get("lease_id").(string)
- if leaseID == "" {
- return logical.ErrorResponse("lease_id must be specified"),
- logical.ErrInvalidRequest
- }
-
- leaseTimes, err := b.Core.expiration.FetchLeaseTimes(ctx, leaseID)
- if err != nil {
- b.Backend.Logger().Error("error retrieving lease", "lease_id", leaseID, "error", err)
- return handleError(err)
- }
- if leaseTimes == nil {
- return logical.ErrorResponse("invalid lease"), logical.ErrInvalidRequest
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "id": leaseID,
- "issue_time": leaseTimes.IssueTime,
- "expire_time": nil,
- "last_renewal": nil,
- "ttl": int64(0),
- },
- }
- renewable, _ := leaseTimes.renewable()
- resp.Data["renewable"] = renewable
-
- if !leaseTimes.LastRenewalTime.IsZero() {
- resp.Data["last_renewal"] = leaseTimes.LastRenewalTime
- }
- if !leaseTimes.ExpireTime.IsZero() {
- resp.Data["expire_time"] = leaseTimes.ExpireTime
- resp.Data["ttl"] = leaseTimes.ttl()
- }
- return resp, nil
-}
-
-func (b *SystemBackend) handleLeaseLookupList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- prefix := data.Get("prefix").(string)
- if prefix != "" && !strings.HasSuffix(prefix, "/") {
- prefix = prefix + "/"
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- view := b.Core.expiration.leaseView(ns)
- keys, err := view.List(ctx, prefix)
- if err != nil {
- b.Backend.Logger().Error("error listing leases", "prefix", prefix, "error", err)
- return handleErrorNoReadOnlyForward(err)
- }
- return logical.ListResponse(keys), nil
-}
-
-// handleRenew is used to renew a lease with a given LeaseID
-func (b *SystemBackend) handleRenew(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get all the options
- leaseID := data.Get("lease_id").(string)
- if leaseID == "" {
- leaseID = data.Get("url_lease_id").(string)
- }
- if leaseID == "" {
- return logical.ErrorResponse("lease_id must be specified"),
- logical.ErrInvalidRequest
- }
- incrementRaw := data.Get("increment").(int)
-
- // Convert the increment
- increment := time.Duration(incrementRaw) * time.Second
-
- // Invoke the expiration manager directly
- resp, err := b.Core.expiration.Renew(ctx, leaseID, increment)
- if err != nil {
- b.Backend.Logger().Error("lease renewal failed", "lease_id", leaseID, "error", err)
- return handleErrorNoReadOnlyForward(err)
- }
- return resp, err
-}
-
-// handleRevoke is used to revoke a given LeaseID
-func (b *SystemBackend) handleRevoke(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get all the options
- leaseID := data.Get("lease_id").(string)
- if leaseID == "" {
- leaseID = data.Get("url_lease_id").(string)
- }
- if leaseID == "" {
- return logical.ErrorResponse("lease_id must be specified"),
- logical.ErrInvalidRequest
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- revokeCtx := namespace.ContextWithNamespace(b.Core.activeContext, ns)
- if data.Get("sync").(bool) {
- // Invoke the expiration manager directly
- if err := b.Core.expiration.Revoke(revokeCtx, leaseID); err != nil {
- b.Backend.Logger().Error("lease revocation failed", "lease_id", leaseID, "error", err)
- return handleErrorNoReadOnlyForward(err)
- }
-
- return nil, nil
- }
-
- if err := b.Core.expiration.LazyRevoke(revokeCtx, leaseID); err != nil {
- b.Backend.Logger().Error("lease revocation failed", "lease_id", leaseID, "error", err)
- return handleErrorNoReadOnlyForward(err)
- }
-
- return logical.RespondWithStatusCode(nil, nil, http.StatusAccepted)
-}
-
-// handleRevokePrefix is used to revoke a prefix with many LeaseIDs
-func (b *SystemBackend) handleRevokePrefix(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRevokePrefixCommon(ctx, req, data, false, data.Get("sync").(bool))
-}
-
-// handleRevokeForce is used to revoke a prefix with many LeaseIDs, ignoring errors
-func (b *SystemBackend) handleRevokeForce(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return b.handleRevokePrefixCommon(ctx, req, data, true, true)
-}
-
-// handleRevokePrefixCommon is used to revoke a prefix with many LeaseIDs
-func (b *SystemBackend) handleRevokePrefixCommon(ctx context.Context,
- req *logical.Request, data *framework.FieldData, force, sync bool) (*logical.Response, error) {
- // Get all the options
- prefix := data.Get("prefix").(string)
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- // Invoke the expiration manager directly
- revokeCtx := namespace.ContextWithNamespace(b.Core.activeContext, ns)
- if force {
- err = b.Core.expiration.RevokeForce(revokeCtx, prefix)
- } else {
- err = b.Core.expiration.RevokePrefix(revokeCtx, prefix, sync)
- }
- if err != nil {
- b.Backend.Logger().Error("revoke prefix failed", "prefix", prefix, "error", err)
- return handleErrorNoReadOnlyForward(err)
- }
-
- if sync {
- return nil, nil
- }
-
- return logical.RespondWithStatusCode(nil, nil, http.StatusAccepted)
-}
-
-// handleAuthTable handles the "auth" endpoint to provide the auth table
-func (b *SystemBackend) handleAuthTable(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- b.Core.authLock.RLock()
- defer b.Core.authLock.RUnlock()
-
- resp := &logical.Response{
- Data: make(map[string]interface{}),
- }
-
- for _, entry := range b.Core.auth.Entries {
- // Only show entries for current namespace
- if entry.Namespace().Path != ns.Path {
- continue
- }
-
- cont, err := b.Core.checkReplicatedFiltering(ctx, entry, credentialRoutePrefix)
- if err != nil {
- return nil, err
- }
- if cont {
- continue
- }
-
- info := mountInfo(entry)
- resp.Data[entry.Path] = info
- }
-
- return resp, nil
-}
-
-// handleEnableAuth is used to enable a new credential backend
-func (b *SystemBackend) handleEnableAuth(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.ReplicationState()
- local := data.Get("local").(bool)
- if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
- }
-
- // Get all the options
- path := data.Get("path").(string)
- path = sanitizeMountPath(path)
- logicalType := data.Get("type").(string)
- description := data.Get("description").(string)
- pluginName := data.Get("plugin_name").(string)
- sealWrap := data.Get("seal_wrap").(bool)
- options := data.Get("options").(map[string]string)
-
- var config MountConfig
- var apiConfig APIMountConfig
-
- configMap := data.Get("config").(map[string]interface{})
- if configMap != nil && len(configMap) != 0 {
- err := mapstructure.Decode(configMap, &apiConfig)
- if err != nil {
- return logical.ErrorResponse(
- "unable to convert given auth config information"),
- logical.ErrInvalidRequest
- }
- }
-
- switch apiConfig.DefaultLeaseTTL {
- case "":
- case "system":
- default:
- tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)),
- logical.ErrInvalidRequest
- }
- config.DefaultLeaseTTL = tmpDef
- }
-
- switch apiConfig.MaxLeaseTTL {
- case "":
- case "system":
- default:
- tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)),
- logical.ErrInvalidRequest
- }
- config.MaxLeaseTTL = tmpMax
- }
-
- if config.MaxLeaseTTL != 0 && config.DefaultLeaseTTL > config.MaxLeaseTTL {
- return logical.ErrorResponse(
- "given default lease TTL greater than given max lease TTL"),
- logical.ErrInvalidRequest
- }
-
- if config.DefaultLeaseTTL > b.Core.maxLeaseTTL && config.MaxLeaseTTL == 0 {
- return logical.ErrorResponse(fmt.Sprintf(
- "given default lease TTL greater than system max lease TTL of %d", int(b.Core.maxLeaseTTL.Seconds()))),
- logical.ErrInvalidRequest
- }
-
- switch apiConfig.TokenType {
- case "", "default-service":
- config.TokenType = logical.TokenTypeDefaultService
- case "default-batch":
- config.TokenType = logical.TokenTypeDefaultBatch
- case "service":
- config.TokenType = logical.TokenTypeService
- case "batch":
- config.TokenType = logical.TokenTypeBatch
- default:
- return logical.ErrorResponse(fmt.Sprintf(
- "invalid value for 'token_type'")), logical.ErrInvalidRequest
- }
-
- switch logicalType {
- case "":
- return logical.ErrorResponse(
- "backend type must be specified as a string"),
- logical.ErrInvalidRequest
- case "plugin":
- // Only set plugin name if mount is of type plugin, with apiConfig.PluginName
- // option taking precedence.
- switch {
- case apiConfig.PluginName != "":
- logicalType = apiConfig.PluginName
- case pluginName != "":
- logicalType = pluginName
- default:
- return logical.ErrorResponse(
- "plugin_name must be provided for plugin backend"),
- logical.ErrInvalidRequest
- }
- }
-
- if options != nil && options["version"] != "" {
- return logical.ErrorResponse(fmt.Sprintf(
- "auth method %q does not allow setting a version", logicalType)),
- logical.ErrInvalidRequest
- }
-
- if err := checkListingVisibility(apiConfig.ListingVisibility); err != nil {
- return logical.ErrorResponse(fmt.Sprintf("invalid listing_visibility %s", apiConfig.ListingVisibility)), nil
- }
- config.ListingVisibility = apiConfig.ListingVisibility
-
- if len(apiConfig.AuditNonHMACRequestKeys) > 0 {
- config.AuditNonHMACRequestKeys = apiConfig.AuditNonHMACRequestKeys
- }
- if len(apiConfig.AuditNonHMACResponseKeys) > 0 {
- config.AuditNonHMACResponseKeys = apiConfig.AuditNonHMACResponseKeys
- }
- if len(apiConfig.PassthroughRequestHeaders) > 0 {
- config.PassthroughRequestHeaders = apiConfig.PassthroughRequestHeaders
- }
-
- // Create the mount entry
- me := &MountEntry{
- Table: credentialTableType,
- Path: path,
- Type: logicalType,
- Description: description,
- Config: config,
- Local: local,
- SealWrap: sealWrap,
- Options: options,
- }
-
- // Attempt enabling
- if err := b.Core.enableCredential(ctx, me); err != nil {
- b.Backend.Logger().Error("enable auth mount failed", "path", me.Path, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleDisableAuth is used to disable a credential backend
-func (b *SystemBackend) handleDisableAuth(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- path = sanitizeMountPath(path)
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- fullPath := credentialRoutePrefix + path
-
- repState := b.Core.ReplicationState()
- entry := b.Core.router.MatchingMountEntry(ctx, fullPath)
- if entry != nil && !entry.Local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
- }
-
- // We return success when the mount does not exists to not expose if the
- // mount existed or not
- match := b.Core.router.MatchingMount(ctx, fullPath)
- if match == "" || ns.Path+fullPath != match {
- return nil, nil
- }
-
- prefix, found := b.Core.router.MatchingStoragePrefixByAPIPath(ctx, fullPath)
- if !found {
- b.Backend.Logger().Error("unable to find storage for path", "path", fullPath)
- return handleError(fmt.Errorf("unable to find storage for path: %q", fullPath))
- }
-
- // Attempt disable
- if err := b.Core.disableCredential(ctx, path); err != nil {
- b.Backend.Logger().Error("disable auth mount failed", "path", path, "error", err)
- return handleError(err)
- }
-
- // Remove from filtered mounts
- if err := b.Core.removePrefixFromFilteredPaths(ctx, prefix); err != nil {
- b.Backend.Logger().Error("filtered path removal failed", path, "error", err)
- return handleError(err)
- }
-
- return nil, nil
-}
-
-// handlePoliciesList handles /sys/policy/ and /sys/policies/ endpoints to provide the enabled policies
-func (b *SystemBackend) handlePoliciesList(policyType PolicyType) framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- policies, err := b.Core.policyStore.ListPolicies(ctx, policyType)
- if err != nil {
- return nil, err
- }
-
- switch policyType {
- case PolicyTypeACL:
- // Add the special "root" policy if not egp and we are at the root namespace
- if ns.ID == namespace.RootNamespaceID {
- policies = append(policies, "root")
- }
- resp := logical.ListResponse(policies)
-
- // If the request is from sys/policy/ we handle backwards compatibility
- if strings.HasPrefix(req.Path, "policy") {
- resp.Data["policies"] = resp.Data["keys"]
- }
- return resp, nil
-
- case PolicyTypeRGP:
- return logical.ListResponse(policies), nil
-
- case PolicyTypeEGP:
- nsScopedKeyInfo := getEGPListResponseKeyInfo(b, ns)
- return &logical.Response{
- Data: map[string]interface{}{
- "keys": policies,
- "key_info": nsScopedKeyInfo,
- },
- }, nil
- }
-
- return logical.ErrorResponse("unknown policy type"), nil
- }
-}
-
-// handlePoliciesRead handles the "/sys/policy/" and "/sys/policies//" endpoints to read a policy
-func (b *SystemBackend) handlePoliciesRead(policyType PolicyType) framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- policy, err := b.Core.policyStore.GetPolicy(ctx, name, policyType)
- if err != nil {
- return handleError(err)
- }
-
- if policy == nil {
- return nil, nil
- }
-
- // If the request is from sys/policy/ we handle backwards compatibility
- var respDataPolicyName string
- if policyType == PolicyTypeACL && strings.HasPrefix(req.Path, "policy") {
- respDataPolicyName = "rules"
- } else {
- respDataPolicyName = "policy"
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "name": policy.Name,
- respDataPolicyName: policy.Raw,
- },
- }
-
- switch policy.Type {
- case PolicyTypeRGP, PolicyTypeEGP:
- addSentinelPolicyData(resp.Data, policy)
- }
-
- return resp, nil
- }
-}
-
-// handlePoliciesSet handles the "/sys/policy/" and "/sys/policies//" endpoints to set a policy
-func (b *SystemBackend) handlePoliciesSet(policyType PolicyType) framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- var resp *logical.Response
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- policy := &Policy{
- Name: strings.ToLower(data.Get("name").(string)),
- Type: policyType,
- namespace: ns,
- }
- if policy.Name == "" {
- return logical.ErrorResponse("policy name must be provided in the URL"), nil
- }
-
- policy.Raw = data.Get("policy").(string)
- if policy.Raw == "" && policyType == PolicyTypeACL && strings.HasPrefix(req.Path, "policy") {
- policy.Raw = data.Get("rules").(string)
- if resp == nil {
- resp = &logical.Response{}
- }
- resp.AddWarning("'rules' is deprecated, please use 'policy' instead")
- }
- if policy.Raw == "" {
- return logical.ErrorResponse("'policy' parameter not supplied or empty"), nil
- }
-
- if polBytes, err := base64.StdEncoding.DecodeString(policy.Raw); err == nil {
- policy.Raw = string(polBytes)
- }
-
- switch policyType {
- case PolicyTypeACL:
- p, err := ParseACLPolicy(ns, policy.Raw)
- if err != nil {
- return handleError(err)
- }
- policy.Paths = p.Paths
- policy.Templated = p.Templated
-
- case PolicyTypeRGP, PolicyTypeEGP:
-
- default:
- return logical.ErrorResponse("unknown policy type"), nil
- }
-
- if policy.Type == PolicyTypeRGP || policy.Type == PolicyTypeEGP {
- if errResp := inputSentinelPolicyData(data, policy); errResp != nil {
- return errResp, nil
- }
- }
-
- // Update the policy
- if err := b.Core.policyStore.SetPolicy(ctx, policy); err != nil {
- return handleError(err)
- }
- return resp, nil
- }
-}
-
-func (b *SystemBackend) handlePoliciesDelete(policyType PolicyType) framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("name").(string)
-
- if err := b.Core.policyStore.DeletePolicy(ctx, name, policyType); err != nil {
- return handleError(err)
- }
- return nil, nil
- }
-}
-
-// handleAuditTable handles the "audit" endpoint to provide the audit table
-func (b *SystemBackend) handleAuditTable(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- b.Core.auditLock.RLock()
- defer b.Core.auditLock.RUnlock()
-
- resp := &logical.Response{
- Data: make(map[string]interface{}),
- }
- for _, entry := range b.Core.audit.Entries {
- info := map[string]interface{}{
- "path": entry.Path,
- "type": entry.Type,
- "description": entry.Description,
- "options": entry.Options,
- "local": entry.Local,
- }
- resp.Data[entry.Path] = info
- }
- return resp, nil
-}
-
-// handleAuditHash is used to fetch the hash of the given input data with the
-// specified audit backend's salt
-func (b *SystemBackend) handleAuditHash(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- input := data.Get("input").(string)
- if input == "" {
- return logical.ErrorResponse("the \"input\" parameter is empty"), nil
- }
-
- path = sanitizeMountPath(path)
-
- hash, err := b.Core.auditBroker.GetHash(ctx, path, input)
- if err != nil {
- return logical.ErrorResponse(err.Error()), nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "hash": hash,
- },
- }, nil
-}
-
-// handleEnableAudit is used to enable a new audit backend
-func (b *SystemBackend) handleEnableAudit(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.ReplicationState()
-
- local := data.Get("local").(bool)
- if !local && repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
- }
-
- // Get all the options
- path := data.Get("path").(string)
- backendType := data.Get("type").(string)
- description := data.Get("description").(string)
- options := data.Get("options").(map[string]string)
-
- // Create the mount entry
- me := &MountEntry{
- Table: auditTableType,
- Path: path,
- Type: backendType,
- Description: description,
- Options: options,
- Local: local,
- }
-
- // Attempt enabling
- if err := b.Core.enableAudit(ctx, me, true); err != nil {
- b.Backend.Logger().Error("enable audit mount failed", "path", me.Path, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-// handleDisableAudit is used to disable an audit backend
-func (b *SystemBackend) handleDisableAudit(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Attempt disable
- if existed, err := b.Core.disableAudit(ctx, path, true); existed && err != nil {
- b.Backend.Logger().Error("disable audit mount failed", "path", path, "error", err)
- return handleError(err)
- }
- return nil, nil
-}
-
-func (b *SystemBackend) handleConfigUIHeadersRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- header := data.Get("header").(string)
-
- value, err := b.Core.uiConfig.GetHeader(ctx, header)
- if err != nil {
- return nil, err
- }
- if value == "" {
- return nil, nil
- }
-
- return &logical.Response{
- Data: map[string]interface{}{
- "value": value,
- },
- }, nil
-}
-
-func (b *SystemBackend) handleConfigUIHeadersList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- headers, err := b.Core.uiConfig.HeaderKeys(ctx)
- if err != nil {
- return nil, err
- }
- if len(headers) == 0 {
- return nil, nil
- }
-
- return logical.ListResponse(headers), nil
-}
-
-func (b *SystemBackend) handleConfigUIHeadersUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- header := data.Get("header").(string)
- values := data.Get("values").([]string)
- if header == "" || len(values) == 0 {
- return logical.ErrorResponse("header and values must be specified"), logical.ErrInvalidRequest
- }
-
- lowerHeader := strings.ToLower(header)
- if strings.HasPrefix(lowerHeader, "x-vault-") {
- return logical.ErrorResponse("X-Vault headers cannot be set"), logical.ErrInvalidRequest
- }
-
- // Translate the list of values to the valid header string
- value := http.Header{}
- for _, v := range values {
- value.Add(header, v)
- }
- err := b.Core.uiConfig.SetHeader(ctx, header, value.Get(header))
- if err != nil {
- return nil, err
- }
-
- // Warn when overriding the CSP
- resp := &logical.Response{}
- if lowerHeader == "content-security-policy" {
- resp.AddWarning("overriding default Content-Security-Policy which is secure by default, proceed with caution")
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) handleConfigUIHeadersDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- header := data.Get("header").(string)
- err := b.Core.uiConfig.DeleteHeader(ctx, header)
- if err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-// handleRawRead is used to read directly from the barrier
-func (b *SystemBackend) handleRawRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot read '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- entry, err := b.Core.barrier.Get(ctx, path)
- if err != nil {
- return handleErrorNoReadOnlyForward(err)
- }
- if entry == nil {
- return nil, nil
- }
-
- // Run this through the decompression helper to see if it's been compressed.
- // If the input contained the compression canary, `outputBytes` will hold
- // the decompressed data. If the input was not compressed, then `outputBytes`
- // will be nil.
- outputBytes, _, err := compressutil.Decompress(entry.Value)
- if err != nil {
- return handleErrorNoReadOnlyForward(err)
- }
-
- // `outputBytes` is nil if the input is uncompressed. In that case set it to the original input.
- if outputBytes == nil {
- outputBytes = entry.Value
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "value": string(outputBytes),
- },
- }
- return resp, nil
-}
-
-// handleRawWrite is used to write directly to the barrier
-func (b *SystemBackend) handleRawWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot write '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- value := data.Get("value").(string)
- entry := &Entry{
- Key: path,
- Value: []byte(value),
- }
- if err := b.Core.barrier.Put(ctx, entry); err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- return nil, nil
-}
-
-// handleRawDelete is used to delete directly from the barrier
-func (b *SystemBackend) handleRawDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot delete '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- if err := b.Core.barrier.Delete(ctx, path); err != nil {
- return handleErrorNoReadOnlyForward(err)
- }
- return nil, nil
-}
-
-// handleRawList is used to list directly from the barrier
-func (b *SystemBackend) handleRawList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- path := data.Get("path").(string)
- if path != "" && !strings.HasSuffix(path, "/") {
- path = path + "/"
- }
-
- // Prevent access of protected paths
- for _, p := range protectedPaths {
- if strings.HasPrefix(path, p) {
- err := fmt.Sprintf("cannot list '%s'", path)
- return logical.ErrorResponse(err), logical.ErrInvalidRequest
- }
- }
-
- keys, err := b.Core.barrier.List(ctx, path)
- if err != nil {
- return handleErrorNoReadOnlyForward(err)
- }
- return logical.ListResponse(keys), nil
-}
-
-// handleKeyStatus returns status information about the backend key
-func (b *SystemBackend) handleKeyStatus(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Get the key info
- info, err := b.Core.barrier.ActiveKeyInfo()
- if err != nil {
- return nil, err
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "term": info.Term,
- "install_time": info.InstallTime.Format(time.RFC3339Nano),
- },
- }
- return resp, nil
-}
-
-// handleRotate is used to trigger a key rotation
-func (b *SystemBackend) handleRotate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- repState := b.Core.ReplicationState()
- if repState.HasState(consts.ReplicationPerformanceSecondary) {
- return logical.ErrorResponse("cannot rotate on a replication secondary"), nil
- }
-
- // Rotate to the new term
- newTerm, err := b.Core.barrier.Rotate(ctx)
- if err != nil {
- b.Backend.Logger().Error("failed to create new encryption key", "error", err)
- return handleError(err)
- }
- b.Backend.Logger().Info("installed new encryption key")
-
- // In HA mode, we need to an upgrade path for the standby instances
- if b.Core.ha != nil {
- // Create the upgrade path to the new term
- if err := b.Core.barrier.CreateUpgrade(ctx, newTerm); err != nil {
- b.Backend.Logger().Error("failed to create new upgrade", "term", newTerm, "error", err)
- }
-
- // Schedule the destroy of the upgrade path
- time.AfterFunc(keyRotateGracePeriod, func() {
- if err := b.Core.barrier.DestroyUpgrade(ctx, newTerm); err != nil {
- b.Backend.Logger().Error("failed to destroy upgrade", "term", newTerm, "error", err)
- }
- })
- }
-
- // Write to the canary path, which will force a synchronous truing during
- // replication
- if err := b.Core.barrier.Put(ctx, &Entry{
- Key: coreKeyringCanaryPath,
- Value: []byte(fmt.Sprintf("new-rotation-term-%d", newTerm)),
- }); err != nil {
- b.Core.logger.Error("error saving keyring canary", "error", err)
- return nil, errwrap.Wrapf("failed to save keyring canary: {{err}}", err)
- }
-
- return nil, nil
-}
-
-func (b *SystemBackend) handleWrappingPubkey(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- x, _ := b.Core.wrappingJWTKey.X.MarshalText()
- y, _ := b.Core.wrappingJWTKey.Y.MarshalText()
- return &logical.Response{
- Data: map[string]interface{}{
- "jwt_x": string(x),
- "jwt_y": string(y),
- "jwt_curve": corePrivateKeyTypeP521,
- },
- }, nil
-}
-
-func (b *SystemBackend) handleWrappingWrap(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if req.WrapInfo == nil || req.WrapInfo.TTL == 0 {
- return logical.ErrorResponse("endpoint requires response wrapping to be used"), logical.ErrInvalidRequest
- }
-
- // N.B.: Do *NOT* allow JWT wrapping tokens to be created through this
- // endpoint. JWTs are signed so if we don't allow users to create wrapping
- // tokens using them we can ensure that an operator can't spoof a legit JWT
- // wrapped token, which makes certain init/rekey/generate-root cases have
- // better properties.
- req.WrapInfo.Format = "uuid"
-
- return &logical.Response{
- Data: data.Raw,
- }, nil
-}
-
-// handleWrappingUnwrap will unwrap a response wrapping token or complete a
-// request that required a control group.
-func (b *SystemBackend) handleWrappingUnwrap(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // If a third party is unwrapping (rather than the calling token being the
- // wrapping token) we detect this so that we can revoke the original
- // wrapping token after reading it
- var thirdParty bool
-
- token := data.Get("token").(string)
- if token != "" {
- thirdParty = true
- } else {
- token = req.ClientToken
- }
-
- // Get the policies so we can determine if this is a normal response
- // wrapping request or a control group token.
- //
- // We use lookupTainted here because the token might have already been used
- // by handleRequest(), this happens when it's a normal response wrapping
- // request and the token was provided "first party". We want to inspect the
- // token policies but will not use this token entry for anything else.
- te, err := b.Core.tokenStore.lookupTainted(ctx, token)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return nil, nil
- }
- if len(te.Policies) != 1 {
- return nil, errors.New("token is not a valid unwrap token")
- }
-
- unwrapNS, err := NamespaceByID(ctx, te.NamespaceID, b.Core)
- if err != nil {
- return nil, err
- }
- unwrapCtx := namespace.ContextWithNamespace(ctx, unwrapNS)
-
- var response string
- switch te.Policies[0] {
- case controlGroupPolicyName:
- response, err = controlGroupUnwrap(unwrapCtx, b, token, thirdParty)
- case responseWrappingPolicyName:
- response, err = b.responseWrappingUnwrap(unwrapCtx, te, thirdParty)
- }
- if err != nil {
- var respErr *logical.Response
- if len(response) > 0 {
- respErr = logical.ErrorResponse(response)
- }
-
- return respErr, err
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{},
- }
-
- // Most of the time we want to just send over the marshalled HTTP bytes.
- // However there is a sad separate case: if the original response was using
- // bare values we need to use those or else what comes back is garbled.
- httpResp := &logical.HTTPResponse{}
- err = jsonutil.DecodeJSON([]byte(response), httpResp)
- if err != nil {
- return nil, errwrap.Wrapf("error decoding wrapped response: {{err}}", err)
- }
- if httpResp.Data != nil &&
- (httpResp.Data[logical.HTTPStatusCode] != nil ||
- httpResp.Data[logical.HTTPRawBody] != nil ||
- httpResp.Data[logical.HTTPContentType] != nil) {
- if httpResp.Data[logical.HTTPStatusCode] != nil {
- resp.Data[logical.HTTPStatusCode] = httpResp.Data[logical.HTTPStatusCode]
- }
- if httpResp.Data[logical.HTTPContentType] != nil {
- resp.Data[logical.HTTPContentType] = httpResp.Data[logical.HTTPContentType]
- }
-
- rawBody := httpResp.Data[logical.HTTPRawBody]
- if rawBody != nil {
- // Decode here so that we can audit properly
- switch rawBody.(type) {
- case string:
- // Best effort decoding; if this works, the original value was
- // probably a []byte instead of a string, but was marshaled
- // when the value was saved, so this restores it as it was
- decBytes, err := base64.StdEncoding.DecodeString(rawBody.(string))
- if err == nil {
- // We end up with []byte, will not be HMAC'd
- resp.Data[logical.HTTPRawBody] = decBytes
- } else {
- // We end up with string, will be HMAC'd
- resp.Data[logical.HTTPRawBody] = rawBody
- }
- default:
- b.Core.Logger().Error("unexpected type of raw body when decoding wrapped token", "type", fmt.Sprintf("%T", rawBody))
- }
-
- resp.Data[logical.HTTPRawBodyAlreadyJSONDecoded] = true
- }
-
- return resp, nil
- }
-
- if len(response) == 0 {
- resp.Data[logical.HTTPStatusCode] = 204
- } else {
- resp.Data[logical.HTTPStatusCode] = 200
- resp.Data[logical.HTTPRawBody] = []byte(response)
- resp.Data[logical.HTTPContentType] = "application/json"
- }
-
- return resp, nil
-}
-
-// responseWrappingUnwrap will read the stored response in the cubbyhole and
-// return the raw HTTP response.
-func (b *SystemBackend) responseWrappingUnwrap(ctx context.Context, te *logical.TokenEntry, thirdParty bool) (string, error) {
- tokenID := te.ID
- if thirdParty {
- // Use the token to decrement the use count to avoid a second operation on the token.
- _, err := b.Core.tokenStore.UseTokenByID(ctx, tokenID)
- if err != nil {
- return "", errwrap.Wrapf("error decrementing wrapping token's use-count: {{err}}", err)
- }
-
- defer b.Core.tokenStore.revokeOrphan(ctx, tokenID)
- }
-
- cubbyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/response",
- ClientToken: tokenID,
- }
- cubbyReq.SetTokenEntry(te)
- cubbyResp, err := b.Core.router.Route(ctx, cubbyReq)
- if err != nil {
- return "", errwrap.Wrapf("error looking up wrapping information: {{err}}", err)
- }
- if cubbyResp == nil {
- return "no information found; wrapping token may be from a previous Vault version", ErrInternalError
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp.Error().Error(), nil
- }
- if cubbyResp.Data == nil {
- return "wrapping information was nil; wrapping token may be from a previous Vault version", ErrInternalError
- }
-
- responseRaw := cubbyResp.Data["response"]
- if responseRaw == nil {
- return "", fmt.Errorf("no response found inside the cubbyhole")
- }
- response, ok := responseRaw.(string)
- if !ok {
- return "", fmt.Errorf("could not decode response inside the cubbyhole")
- }
-
- return response, nil
-}
-
-func (b *SystemBackend) handleWrappingLookup(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // This ordering of lookups has been validated already in the wrapping
- // validation func, we're just doing this for a safety check
- token := data.Get("token").(string)
- if token == "" {
- token = req.ClientToken
- if token == "" {
- return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest
- }
- }
-
- te, err := b.Core.tokenStore.lookupTainted(ctx, token)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return nil, nil
- }
- if len(te.Policies) != 1 {
- return nil, errors.New("token is not a valid unwrap token")
- }
-
- cubbyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/wrapinfo",
- ClientToken: token,
- }
- cubbyReq.SetTokenEntry(te)
- cubbyResp, err := b.Core.router.Route(ctx, cubbyReq)
- if err != nil {
- return nil, errwrap.Wrapf("error looking up wrapping information: {{err}}", err)
- }
- if cubbyResp == nil {
- return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp, nil
- }
- if cubbyResp.Data == nil {
- return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
- }
-
- creationTTLRaw := cubbyResp.Data["creation_ttl"]
- creationTime := cubbyResp.Data["creation_time"]
- creationPath := cubbyResp.Data["creation_path"]
-
- resp := &logical.Response{
- Data: map[string]interface{}{},
- }
- if creationTTLRaw != nil {
- creationTTL, err := creationTTLRaw.(json.Number).Int64()
- if err != nil {
- return nil, errwrap.Wrapf("error reading creation_ttl value from wrapping information: {{err}}", err)
- }
- resp.Data["creation_ttl"] = time.Duration(creationTTL).Seconds()
- }
- if creationTime != nil {
- // This was JSON marshaled so it's already a string in RFC3339 format
- resp.Data["creation_time"] = cubbyResp.Data["creation_time"]
- }
- if creationPath != nil {
- resp.Data["creation_path"] = cubbyResp.Data["creation_path"]
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) handleWrappingRewrap(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // If a third party is rewrapping (rather than the calling token being the
- // wrapping token) we detect this so that we can revoke the original
- // wrapping token after reading it. Right now wrapped tokens can't unwrap
- // themselves, but in case we change it, this will be ready to do the right
- // thing.
- var thirdParty bool
-
- token := data.Get("token").(string)
- if token != "" {
- thirdParty = true
- } else {
- token = req.ClientToken
- }
-
- te, err := b.Core.tokenStore.lookupTainted(ctx, token)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return nil, nil
- }
- if len(te.Policies) != 1 {
- return nil, errors.New("token is not a valid unwrap token")
- }
-
- if thirdParty {
- // Use the token to decrement the use count to avoid a second operation on the token.
- _, err := b.Core.tokenStore.UseTokenByID(ctx, token)
- if err != nil {
- return nil, errwrap.Wrapf("error decrementing wrapping token's use-count: {{err}}", err)
- }
- defer b.Core.tokenStore.revokeOrphan(ctx, token)
- }
-
- // Fetch the original TTL
- cubbyReq := &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/wrapinfo",
- ClientToken: token,
- }
- cubbyReq.SetTokenEntry(te)
- cubbyResp, err := b.Core.router.Route(ctx, cubbyReq)
- if err != nil {
- return nil, errwrap.Wrapf("error looking up wrapping information: {{err}}", err)
- }
- if cubbyResp == nil {
- return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp, nil
- }
- if cubbyResp.Data == nil {
- return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
- }
-
- // Set the creation TTL on the request
- creationTTLRaw := cubbyResp.Data["creation_ttl"]
- if creationTTLRaw == nil {
- return nil, fmt.Errorf("creation_ttl value in wrapping information was nil")
- }
- creationTTL, err := cubbyResp.Data["creation_ttl"].(json.Number).Int64()
- if err != nil {
- return nil, errwrap.Wrapf("error reading creation_ttl value from wrapping information: {{err}}", err)
- }
-
- // Get creation_path to return as the response later
- creationPathRaw := cubbyResp.Data["creation_path"]
- if creationPathRaw == nil {
- return nil, fmt.Errorf("creation_path value in wrapping information was nil")
- }
- creationPath := creationPathRaw.(string)
-
- // Fetch the original response and return it as the data for the new response
- cubbyReq = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "cubbyhole/response",
- ClientToken: token,
- }
- cubbyReq.SetTokenEntry(te)
- cubbyResp, err = b.Core.router.Route(ctx, cubbyReq)
- if err != nil {
- return nil, errwrap.Wrapf("error looking up response: {{err}}", err)
- }
- if cubbyResp == nil {
- return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- return cubbyResp, nil
- }
- if cubbyResp.Data == nil {
- return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
- }
-
- response := cubbyResp.Data["response"]
- if response == nil {
- return nil, fmt.Errorf("no response found inside the cubbyhole")
- }
-
- // Return response in "response"; wrapping code will detect the rewrap and
- // slot in instead of nesting
- return &logical.Response{
- Data: map[string]interface{}{
- "response": response,
- },
- WrapInfo: &wrapping.ResponseWrapInfo{
- TTL: time.Duration(creationTTL),
- CreationPath: creationPath,
- },
- }, nil
-}
-
-func (b *SystemBackend) pathHashWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- inputB64 := d.Get("input").(string)
- format := d.Get("format").(string)
- algorithm := d.Get("urlalgorithm").(string)
- if algorithm == "" {
- algorithm = d.Get("algorithm").(string)
- }
-
- input, err := base64.StdEncoding.DecodeString(inputB64)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest
- }
-
- switch format {
- case "hex":
- case "base64":
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil
- }
-
- var hf hash.Hash
- switch algorithm {
- case "sha2-224":
- hf = sha256.New224()
- case "sha2-256":
- hf = sha256.New()
- case "sha2-384":
- hf = sha512.New384()
- case "sha2-512":
- hf = sha512.New()
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil
- }
- hf.Write(input)
- retBytes := hf.Sum(nil)
-
- var retStr string
- switch format {
- case "hex":
- retStr = hex.EncodeToString(retBytes)
- case "base64":
- retStr = base64.StdEncoding.EncodeToString(retBytes)
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "sum": retStr,
- },
- }
- return resp, nil
-}
-
-func (b *SystemBackend) pathRandomWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- bytes := 0
- var err error
- strBytes := d.Get("urlbytes").(string)
- if strBytes != "" {
- bytes, err = strconv.Atoi(strBytes)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil
- }
- } else {
- bytes = d.Get("bytes").(int)
- }
- format := d.Get("format").(string)
-
- if bytes < 1 {
- return logical.ErrorResponse(`"bytes" cannot be less than 1`), nil
- }
-
- switch format {
- case "hex":
- case "base64":
- default:
- return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil
- }
-
- randBytes, err := uuid.GenerateRandomBytes(bytes)
- if err != nil {
- return nil, err
- }
-
- var retStr string
- switch format {
- case "hex":
- retStr = hex.EncodeToString(randBytes)
- case "base64":
- retStr = base64.StdEncoding.EncodeToString(randBytes)
- }
-
- // Generate the response
- resp := &logical.Response{
- Data: map[string]interface{}{
- "random_bytes": retStr,
- },
- }
- return resp, nil
-}
-
-func hasMountAccess(ctx context.Context, acl *ACL, path string) bool {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return false
- }
-
- // If an earlier policy is giving us access to the mount path then we can do
- // a fast return.
- capabilities := acl.Capabilities(ctx, ns.TrimmedPath(path))
- if !strutil.StrListContains(capabilities, DenyCapability) {
- return true
- }
-
- var aclCapabilitiesGiven bool
- walkFn := func(s string, v interface{}) bool {
- if v == nil {
- return false
- }
-
- perms := v.(*ACLPermissions)
-
- switch {
- case perms.CapabilitiesBitmap&DenyCapabilityInt > 0:
- return false
-
- case perms.CapabilitiesBitmap&CreateCapabilityInt > 0,
- perms.CapabilitiesBitmap&DeleteCapabilityInt > 0,
- perms.CapabilitiesBitmap&ListCapabilityInt > 0,
- perms.CapabilitiesBitmap&ReadCapabilityInt > 0,
- perms.CapabilitiesBitmap&SudoCapabilityInt > 0,
- perms.CapabilitiesBitmap&UpdateCapabilityInt > 0:
-
- aclCapabilitiesGiven = true
- return true
- }
-
- return false
- }
-
- acl.exactRules.WalkPrefix(path, walkFn)
- if !aclCapabilitiesGiven {
- acl.globRules.WalkPrefix(path, walkFn)
- }
-
- return aclCapabilitiesGiven
-}
-
-func (b *SystemBackend) pathInternalUIMountsRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- resp := &logical.Response{
- Data: make(map[string]interface{}),
- }
-
- secretMounts := make(map[string]interface{})
- authMounts := make(map[string]interface{})
- resp.Data["secret"] = secretMounts
- resp.Data["auth"] = authMounts
-
- var acl *ACL
- var isAuthed bool
- if req.ClientToken != "" {
- isAuthed = true
-
- var entity *identity.Entity
- var te *logical.TokenEntry
- // Load the ACL policies so we can walk the prefix for this mount
- acl, te, entity, _, err = b.Core.fetchACLTokenEntryAndEntity(ctx, req)
- if err != nil {
- if errwrap.ContainsType(err, new(TemplateError)) {
- b.Core.logger.Warn("permission denied due to a templated policy being invalid or containing directives not satisfied by the requestor", "error", err)
- err = logical.ErrPermissionDenied
- }
- return nil, err
- }
- if entity != nil && entity.Disabled {
- b.logger.Warn("permission denied as the entity on the token is disabled")
- return nil, logical.ErrPermissionDenied
- }
- if te != nil && te.EntityID != "" && entity == nil {
- b.logger.Warn("permission denied as the entity on the token is invalid")
- return nil, logical.ErrPermissionDenied
- }
- }
-
- hasAccess := func(ctx context.Context, me *MountEntry) bool {
- if me.Config.ListingVisibility == ListingVisibilityUnauth {
- return true
- }
-
- if isAuthed {
- return hasMountAccess(ctx, acl, ns.Path+me.Path)
- }
-
- return false
- }
-
- b.Core.mountsLock.RLock()
- for _, entry := range b.Core.mounts.Entries {
- if hasAccess(ctx, entry) && ns.ID == entry.NamespaceID {
- if isAuthed {
- // If this is an authed request return all the mount info
- secretMounts[entry.Path] = mountInfo(entry)
- } else {
- secretMounts[entry.Path] = map[string]interface{}{
- "type": entry.Type,
- "description": entry.Description,
- "options": entry.Options,
- }
- }
- }
- }
- b.Core.mountsLock.RUnlock()
-
- b.Core.authLock.RLock()
- for _, entry := range b.Core.auth.Entries {
- if hasAccess(ctx, entry) && ns.ID == entry.NamespaceID {
- if isAuthed {
- // If this is an authed request return all the mount info
- authMounts[entry.Path] = mountInfo(entry)
- } else {
- authMounts[entry.Path] = map[string]interface{}{
- "type": entry.Type,
- "description": entry.Description,
- "options": entry.Options,
- }
- }
- }
- }
- b.Core.authLock.RUnlock()
-
- return resp, nil
-}
-
-func (b *SystemBackend) pathInternalUIMountRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- path := d.Get("path").(string)
- if path == "" {
- return logical.ErrorResponse("path not set"), logical.ErrInvalidRequest
- }
- path = sanitizeMountPath(path)
-
- errResp := logical.ErrorResponse(fmt.Sprintf("preflight capability check returned 403, please ensure client's policies grant access to path %q", path))
-
- me := b.Core.router.MatchingMountEntry(ctx, path)
- if me == nil {
- // Return a permission denied error here so this path cannot be used to
- // brute force a list of mounts.
- return errResp, logical.ErrPermissionDenied
- }
-
- resp := &logical.Response{
- Data: mountInfo(me),
- }
- resp.Data["path"] = me.Path
-
- // Load the ACL policies so we can walk the prefix for this mount
- acl, te, entity, _, err := b.Core.fetchACLTokenEntryAndEntity(ctx, req)
- if err != nil {
- if errwrap.ContainsType(err, new(TemplateError)) {
- b.Core.logger.Warn("permission denied due to a templated policy being invalid or containing directives not satisfied by the requestor", "error", err)
- err = logical.ErrPermissionDenied
- }
- return nil, err
- }
- if entity != nil && entity.Disabled {
- b.logger.Warn("permission denied as the entity on the token is disabled")
- return errResp, logical.ErrPermissionDenied
- }
- if te != nil && te.EntityID != "" && entity == nil {
- b.logger.Warn("permission denied as the entity on the token is invalid")
- return nil, logical.ErrPermissionDenied
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- if !hasMountAccess(ctx, acl, ns.Path+me.Path) {
- return errResp, logical.ErrPermissionDenied
- }
-
- return resp, nil
-}
-
-func (b *SystemBackend) pathInternalUIResultantACL(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- if req.ClientToken == "" {
- // 204 -- no ACL
- return nil, nil
- }
-
- acl, te, entity, _, err := b.Core.fetchACLTokenEntryAndEntity(ctx, req)
- if err != nil {
- if errwrap.ContainsType(err, new(TemplateError)) {
- b.Core.logger.Warn("permission denied due to a templated policy being invalid or containing directives not satisfied by the requestor", "error", err)
- err = logical.ErrPermissionDenied
- }
- return nil, err
- }
-
- if entity != nil && entity.Disabled {
- b.logger.Warn("permission denied as the entity on the token is disabled")
- return logical.ErrorResponse(logical.ErrPermissionDenied.Error()), nil
- }
- if te != nil && te.EntityID != "" && entity == nil {
- b.logger.Warn("permission denied as the entity on the token is invalid")
- return logical.ErrorResponse(logical.ErrPermissionDenied.Error()), nil
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "root": false,
- },
- }
-
- if acl.root {
- resp.Data["root"] = true
- return resp, nil
- }
-
- exact := map[string]interface{}{}
- glob := map[string]interface{}{}
-
- walkFn := func(pt map[string]interface{}, s string, v interface{}) {
- if v == nil {
- return
- }
-
- perms := v.(*ACLPermissions)
- capabilities := []string{}
-
- if perms.CapabilitiesBitmap&CreateCapabilityInt > 0 {
- capabilities = append(capabilities, CreateCapability)
- }
- if perms.CapabilitiesBitmap&DeleteCapabilityInt > 0 {
- capabilities = append(capabilities, DeleteCapability)
- }
- if perms.CapabilitiesBitmap&ListCapabilityInt > 0 {
- capabilities = append(capabilities, ListCapability)
- }
- if perms.CapabilitiesBitmap&ReadCapabilityInt > 0 {
- capabilities = append(capabilities, ReadCapability)
- }
- if perms.CapabilitiesBitmap&SudoCapabilityInt > 0 {
- capabilities = append(capabilities, SudoCapability)
- }
- if perms.CapabilitiesBitmap&UpdateCapabilityInt > 0 {
- capabilities = append(capabilities, UpdateCapability)
- }
-
- // If "deny" is explicitly set or if the path has no capabilities at all,
- // set the path capabilities to "deny"
- if perms.CapabilitiesBitmap&DenyCapabilityInt > 0 || len(capabilities) == 0 {
- capabilities = []string{DenyCapability}
- }
-
- res := map[string]interface{}{}
- if len(capabilities) > 0 {
- res["capabilities"] = capabilities
- }
- if perms.MinWrappingTTL != 0 {
- res["min_wrapping_ttl"] = int64(perms.MinWrappingTTL.Seconds())
- }
- if perms.MaxWrappingTTL != 0 {
- res["max_wrapping_ttl"] = int64(perms.MaxWrappingTTL.Seconds())
- }
- if len(perms.AllowedParameters) > 0 {
- res["allowed_parameters"] = perms.AllowedParameters
- }
- if len(perms.DeniedParameters) > 0 {
- res["denied_parameters"] = perms.DeniedParameters
- }
- if len(perms.RequiredParameters) > 0 {
- res["required_parameters"] = perms.RequiredParameters
- }
-
- pt[s] = res
- }
-
- exactWalkFn := func(s string, v interface{}) bool {
- walkFn(exact, s, v)
- return false
- }
-
- globWalkFn := func(s string, v interface{}) bool {
- walkFn(glob, s, v)
- return false
- }
-
- acl.exactRules.Walk(exactWalkFn)
- acl.globRules.Walk(globWalkFn)
-
- resp.Data["exact_paths"] = exact
- resp.Data["glob_paths"] = glob
-
- return resp, nil
-}
-
-func (b *SystemBackend) pathInternalOpenAPI(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
-
- // Limit output to authorized paths
- resp, err := b.pathInternalUIMountsRead(ctx, req, d)
- if err != nil {
- return nil, err
- }
-
- // Set up target document and convert to map[string]interface{} which is what will
- // be received from plugin backends.
- doc := framework.NewOASDocument()
-
- procMountGroup := func(group, mountPrefix string) error {
- for mount := range resp.Data[group].(map[string]interface{}) {
- backend := b.Core.router.MatchingBackend(ctx, mountPrefix+mount)
-
- if backend == nil {
- continue
- }
-
- req := &logical.Request{
- Operation: logical.HelpOperation,
- }
-
- resp, err := backend.HandleRequest(ctx, req)
- if err != nil {
- return err
- }
-
- var backendDoc *framework.OASDocument
-
- // Normalize response type, which will be different if received
- // from an external plugin.
- switch v := resp.Data["openapi"].(type) {
- case *framework.OASDocument:
- backendDoc = v
- case map[string]interface{}:
- backendDoc, err = framework.NewOASDocumentFromMap(v)
- if err != nil {
- return err
- }
- default:
- continue
- }
-
- // Prepare to add tags to default builtins that are
- // type "unknown" and won't already be tagged.
- var tag string
- switch mountPrefix + mount {
- case "cubbyhole/", "secret/":
- tag = "secrets"
- case "sys/":
- tag = "system"
- case "auth/token/":
- tag = "auth"
- case "identity/":
- tag = "identity"
- }
-
- // Merge backend paths with existing document
- for path, obj := range backendDoc.Paths {
- path := strings.TrimPrefix(path, "/")
-
- // Add tags to all of the operations if necessary
- if tag != "" {
- for _, op := range []*framework.OASOperation{obj.Get, obj.Post, obj.Delete} {
- // TODO: a special override for identity is used used here because the backend
- // is currently categorized as "secret", which will likely change. Also of interest
- // is removing all tag handling here and providing the mount information to OpenAPI.
- if op != nil && (len(op.Tags) == 0 || tag == "identity") {
- op.Tags = []string{tag}
- }
- }
- }
-
- doc.Paths["/"+mountPrefix+mount+path] = obj
- }
- }
- return nil
- }
-
- if err := procMountGroup("secret", ""); err != nil {
- return nil, err
- }
- if err := procMountGroup("auth", "auth/"); err != nil {
- return nil, err
- }
-
- buf, err := json.Marshal(doc)
- if err != nil {
- return nil, err
- }
-
- resp = &logical.Response{
- Data: map[string]interface{}{
- logical.HTTPStatusCode: 200,
- logical.HTTPRawBody: buf,
- logical.HTTPContentType: "application/json",
- },
- }
-
- return resp, nil
-}
-
-func sanitizeMountPath(path string) string {
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
-
- if strings.HasPrefix(path, "/") {
- path = path[1:]
- }
-
- return path
-}
-
-func checkListingVisibility(visibility ListingVisibilityType) error {
- switch visibility {
- case ListingVisibilityDefault:
- case ListingVisibilityHidden:
- case ListingVisibilityUnauth:
- default:
- return fmt.Errorf("invalid listing visilibity type")
- }
-
- return nil
-}
-
-const sysHelpRoot = `
-The system backend is built-in to Vault and cannot be remounted or
-unmounted. It contains the paths that are used to configure Vault itself
-as well as perform core operations.
-`
-
-// sysHelp is all the help text for the sys backend.
-var sysHelp = map[string][2]string{
- "license": {
- "Sets the license of the server.",
- `
-The path responds to the following HTTP methods.
-
- GET /
- Returns information on the installed license
-
- POST
- Sets the license for the server
- `,
- },
- "config/cors": {
- "Configures or returns the current configuration of CORS settings.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the configuration of the CORS setting.
-
- POST /
- Sets the comma-separated list of origins that can make cross-origin requests.
-
- DELETE /
- Clears the CORS configuration and disables acceptance of CORS requests.
- `,
- },
- "config/ui/headers": {
- "Configures response headers that should be returned from the UI.",
- `
-This path responds to the following HTTP methods.
- GET /
- Returns the header value.
- POST /
- Sets the header value for the UI.
- DELETE /
- Clears the header value for UI.
-
- LIST /
- List the headers configured for the UI.
- `,
- },
- "init": {
- "Initializes or returns the initialization status of the Vault.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the initialization status of the Vault.
-
- POST /
- Initializes a new vault.
- `,
- },
- "generate-root": {
- "Reads, generates, or deletes a root token regeneration process.",
- `
-This path responds to multiple HTTP methods which change the behavior. Those
-HTTP methods are listed below.
-
- GET /attempt
- Reads the configuration and progress of the current root generation
- attempt.
-
- POST /attempt
- Initializes a new root generation attempt. Only a single root generation
- attempt can take place at a time. One (and only one) of otp or pgp_key
- are required.
-
- DELETE /attempt
- Cancels any in-progress root generation attempt. This clears any
- progress made. This must be called to change the OTP or PGP key being
- used.
- `,
- },
- "seal-status": {
- "Returns the seal status of the Vault.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the seal status of the Vault. This is an unauthenticated
- endpoint.
- `,
- },
- "seal": {
- "Seals the Vault.",
- `
-This path responds to the following HTTP methods.
-
- PUT /
- Seals the Vault.
- `,
- },
- "unseal": {
- "Unseals the Vault.",
- `
-This path responds to the following HTTP methods.
-
- PUT /
- Unseals the Vault.
- `,
- },
- "mounts": {
- "List the currently mounted backends.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Lists all the mounted secret backends.
-
- GET /
- Get information about the mount at the specified path.
-
- POST /
- Mount a new secret backend to the mount point in the URL.
-
- POST //tune
- Tune configuration parameters for the given mount point.
-
- DELETE /
- Unmount the specified mount point.
- `,
- },
-
- "mount": {
- `Mount a new backend at a new path.`,
- `
-Mount a backend at a new path. A backend can be mounted multiple times at
-multiple paths in order to configure multiple separately configured backends.
-Example: you might have an AWS backend for the east coast, and one for the
-west coast.
- `,
- },
-
- "mount_path": {
- `The path to mount to. Example: "aws/east"`,
- "",
- },
-
- "mount_type": {
- `The type of the backend. Example: "passthrough"`,
- "",
- },
-
- "mount_desc": {
- `User-friendly description for this mount.`,
- "",
- },
-
- "mount_config": {
- `Configuration for this mount, such as default_lease_ttl
-and max_lease_ttl.`,
- },
-
- "mount_local": {
- `Mark the mount as a local mount, which is not replicated
-and is unaffected by replication.`,
- },
-
- "mount_plugin_name": {
- `Name of the plugin to mount based from the name registered
-in the plugin catalog.`,
- },
-
- "mount_options": {
- `The options to pass into the backend. Should be a json object with string keys and values.`,
- },
-
- "seal_wrap": {
- `Whether to turn on seal wrapping for the mount.`,
- },
-
- "tune_default_lease_ttl": {
- `The default lease TTL for this mount.`,
- },
-
- "tune_max_lease_ttl": {
- `The max lease TTL for this mount.`,
- },
-
- "tune_audit_non_hmac_request_keys": {
- `The list of keys in the request data object that will not be HMAC'ed by audit devices.`,
- },
-
- "tune_audit_non_hmac_response_keys": {
- `The list of keys in the response data object that will not be HMAC'ed by audit devices.`,
- },
-
- "tune_mount_options": {
- `The options to pass into the backend. Should be a json object with string keys and values.`,
- },
-
- "remount": {
- "Move the mount point of an already-mounted backend.",
- `
-This path responds to the following HTTP methods.
-
- POST /sys/remount
- Changes the mount point of an already-mounted backend.
- `,
- },
-
- "auth_tune": {
- "Tune the configuration parameters for an auth path.",
- `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
-the auth path.`,
- },
-
- "mount_tune": {
- "Tune backend configuration parameters for this mount.",
- `Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
-the mount.`,
- },
-
- "renew": {
- "Renew a lease on a secret",
- `
-When a secret is read, it may optionally include a lease interval
-and a boolean indicating if renew is possible. For secrets that support
-lease renewal, this endpoint is used to extend the validity of the
-lease and to prevent an automatic revocation.
- `,
- },
-
- "lease_id": {
- "The lease identifier to renew. This is included with a lease.",
- "",
- },
-
- "increment": {
- "The desired increment in seconds to the lease",
- "",
- },
-
- "revoke": {
- "Revoke a leased secret immediately",
- `
-When a secret is generated with a lease, it is automatically revoked
-at the end of the lease period if not renewed. However, in some cases
-you may want to force an immediate revocation. This endpoint can be
-used to revoke the secret with the given Lease ID.
- `,
- },
-
- "revoke-sync": {
- "Whether or not to perform the revocation synchronously",
- `
-If false, the call will return immediately and revocation will be queued; if it
-fails, Vault will keep trying. If true, if the revocation fails, Vault will not
-automatically try again and will return an error. For revoke-prefix, this
-setting will apply to all leases being revoked. For revoke-force, since errors
-are ignored, this setting is not supported.
-`,
- },
-
- "revoke-prefix": {
- "Revoke all secrets generated in a given prefix",
- `
-Revokes all the secrets generated under a given mount prefix. As
-an example, "prod/aws/" might be the AWS logical backend, and due to
-a change in the "ops" policy, we may want to invalidate all the secrets
-generated. We can do a revoke prefix at "prod/aws/ops" to revoke all
-the ops secrets. This does a prefix match on the Lease IDs and revokes
-all matching leases.
- `,
- },
-
- "revoke-prefix-path": {
- `The path to revoke keys under. Example: "prod/aws/ops"`,
- "",
- },
-
- "revoke-force": {
- "Revoke all secrets generated in a given prefix, ignoring errors.",
- `
-See the path help for 'revoke-prefix'; this behaves the same, except that it
-ignores errors encountered during revocation. This can be used in certain
-recovery situations; for instance, when you want to unmount a backend, but it
-is impossible to fix revocation errors and these errors prevent the unmount
-from proceeding. This is a DANGEROUS operation as it removes Vault's oversight
-of external secrets. Access to this prefix should be tightly controlled.
- `,
- },
-
- "revoke-force-path": {
- `The path to revoke keys under. Example: "prod/aws/ops"`,
- "",
- },
-
- "auth-table": {
- "List the currently enabled credential backends.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- List the currently enabled credential backends: the name, the type of
- the backend, and a user friendly description of the purpose for the
- credential backend.
-
- POST /
- Enable a new auth method.
-
- DELETE /
- Disable the auth method at the given mount point.
- `,
- },
-
- "auth": {
- `Enable a new credential backend with a name.`,
- `
-Enable a credential mechanism at a new path. A backend can be mounted multiple times at
-multiple paths in order to configure multiple separately configured backends.
-Example: you might have an OAuth backend for GitHub, and one for Google Apps.
- `,
- },
-
- "auth_path": {
- `The path to mount to. Cannot be delimited. Example: "user"`,
- "",
- },
-
- "auth_type": {
- `The type of the backend. Example: "userpass"`,
- "",
- },
-
- "auth_desc": {
- `User-friendly description for this credential backend.`,
- "",
- },
-
- "auth_config": {
- `Configuration for this mount, such as plugin_name.`,
- },
-
- "auth_plugin": {
- `Name of the auth plugin to use based from the name in the plugin catalog.`,
- "",
- },
-
- "auth_options": {
- `The options to pass into the backend. Should be a json object with string keys and values.`,
- },
-
- "policy-list": {
- `List the configured access control policies.`,
- `
-This path responds to the following HTTP methods.
-
- GET /
- List the names of the configured access control policies.
-
- GET /
- Retrieve the rules for the named policy.
-
- PUT /
- Add or update a policy.
-
- DELETE /
- Delete the policy with the given name.
- `,
- },
-
- "policy": {
- `Read, Modify, or Delete an access control policy.`,
- `
-Read the rules of an existing policy, create or update the rules of a policy,
-or delete a policy.
- `,
- },
-
- "policy-name": {
- `The name of the policy. Example: "ops"`,
- "",
- },
-
- "policy-rules": {
- `The rules of the policy.`,
- "",
- },
-
- "policy-paths": {
- `The paths on which the policy should be applied.`,
- "",
- },
-
- "policy-enforcement-level": {
- `The enforcement level to apply to the policy.`,
- "",
- },
-
- "audit-hash": {
- "The hash of the given string via the given audit backend",
- "",
- },
-
- "audit-table": {
- "List the currently enabled audit backends.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- List the currently enabled audit backends.
-
- PUT /
- Enable an audit backend at the given path.
-
- DELETE /
- Disable the given audit backend.
- `,
- },
-
- "audit_path": {
- `The name of the backend. Cannot be delimited. Example: "mysql"`,
- "",
- },
-
- "audit_type": {
- `The type of the backend. Example: "mysql"`,
- "",
- },
-
- "audit_desc": {
- `User-friendly description for this audit backend.`,
- "",
- },
-
- "audit_opts": {
- `Configuration options for the audit backend.`,
- "",
- },
-
- "audit": {
- `Enable or disable audit backends.`,
- `
-Enable a new audit backend or disable an existing backend.
- `,
- },
-
- "key-status": {
- "Provides information about the backend encryption key.",
- `
- Provides the current backend encryption key term and installation time.
- `,
- },
-
- "rotate": {
- "Rotates the backend encryption key used to persist data.",
- `
- Rotate generates a new encryption key which is used to encrypt all
- data going to the storage backend. The old encryption keys are kept so
- that data encrypted using those keys can still be decrypted.
- `,
- },
-
- "rekey_backup": {
- "Allows fetching or deleting the backup of the rotated unseal keys.",
- "",
- },
-
- "capabilities": {
- "Fetches the capabilities of the given token on the given path.",
- `Returns the capabilities of the given token on the path.
- The path will be searched for a path match in all the policies associated with the token.`,
- },
-
- "capabilities_self": {
- "Fetches the capabilities of the given token on the given path.",
- `Returns the capabilities of the client token on the path.
- The path will be searched for a path match in all the policies associated with the client token.`,
- },
-
- "capabilities_accessor": {
- "Fetches the capabilities of the token associated with the given token, on the given path.",
- `When there is no access to the token, token accessor can be used to fetch the token's capabilities
- on a given path.`,
- },
-
- "tidy_leases": {
- `This endpoint performs cleanup tasks that can be run if certain error
-conditions have occurred.`,
- `This endpoint performs cleanup tasks that can be run to clean up the
-lease entries after certain error conditions. Usually running this is not
-necessary, and is only required if upgrade notes or support personnel suggest
-it.`,
- },
-
- "wrap": {
- "Response-wraps an arbitrary JSON object.",
- `Round trips the given input data into a response-wrapped token.`,
- },
-
- "wrappubkey": {
- "Returns pubkeys used in some wrapping formats.",
- "Returns pubkeys used in some wrapping formats.",
- },
-
- "unwrap": {
- "Unwraps a response-wrapped token.",
- `Unwraps a response-wrapped token. Unlike simply reading from cubbyhole/response,
- this provides additional validation on the token, and rather than a JSON-escaped
- string, the returned response is the exact same as the contained wrapped response.`,
- },
-
- "wraplookup": {
- "Looks up the properties of a response-wrapped token.",
- `Returns the creation TTL and creation time of a response-wrapped token.`,
- },
-
- "rewrap": {
- "Rotates a response-wrapped token.",
- `Rotates a response-wrapped token; the output is a new token with the same
- response wrapped inside and the same creation TTL. The original token is revoked.`,
- },
- "audited-headers-name": {
- "Configures the headers sent to the audit logs.",
- `
-This path responds to the following HTTP methods.
-
- GET /
- Returns the setting for the header with the given name.
-
- POST /
- Enable auditing of the given header.
-
- DELETE /
- Disable auditing of the given header.
- `,
- },
- "audited-headers": {
- "Lists the headers configured to be audited.",
- `Returns a list of headers that have been configured to be audited.`,
- },
- "plugin-catalog-list-all": {
- "Lists all the plugins known to Vault",
- `
-This path responds to the following HTTP methods.
- LIST /
- Returns a list of names of configured plugins.
- `,
- },
- "plugin-catalog": {
- "Configures the plugins known to Vault",
- `
-This path responds to the following HTTP methods.
- LIST /
- Returns a list of names of configured plugins.
-
- GET /
- Retrieve the metadata for the named plugin.
-
- PUT /
- Add or update plugin.
-
- DELETE /
- Delete the plugin with the given name.
- `,
- },
- "plugin-catalog_name": {
- "The name of the plugin",
- "",
- },
- "plugin-catalog_type": {
- "The type of the plugin, may be auth, secret, or database",
- "",
- },
- "plugin-catalog_sha-256": {
- `The SHA256 sum of the executable used in the
-command field. This should be HEX encoded.`,
- "",
- },
- "plugin-catalog_command": {
- `The command used to start the plugin. The
-executable defined in this command must exist in vault's
-plugin directory.`,
- "",
- },
- "plugin-catalog_args": {
- `The args passed to plugin command.`,
- "",
- },
- "plugin-catalog_env": {
- `The environment variables passed to plugin command.
-Each entry is of the form "key=value".`,
- "",
- },
- "leases": {
- `View or list lease metadata.`,
- `
-This path responds to the following HTTP methods.
-
- PUT /
- Retrieve the metadata for the provided lease id.
-
- LIST /
- Lists the leases for the named prefix.
- `,
- },
-
- "leases-list-prefix": {
- `The path to list leases under. Example: "aws/creds/deploy"`,
- "",
- },
- "plugin-reload": {
- "Reload mounts that use a particular backend plugin.",
- `Reload mounts that use a particular backend plugin. Either the plugin name
- or the desired plugin backend mounts must be provided, but not both. In the
- case that the plugin name is provided, all mounted paths that use that plugin
- backend will be reloaded.`,
- },
- "plugin-backend-reload-plugin": {
- `The name of the plugin to reload, as registered in the plugin catalog.`,
- "",
- },
- "plugin-backend-reload-mounts": {
- `The mount paths of the plugin backends to reload.`,
- "",
- },
- "hash": {
- "Generate a hash sum for input data",
- "Generates a hash sum of the given algorithm against the given input data.",
- },
- "random": {
- "Generate random bytes",
- "This function can be used to generate high-entropy random bytes.",
- },
- "listing_visibility": {
- "Determines the visibility of the mount in the UI-specific listing endpoint. Accepted value are 'unauth' and ''.",
- "",
- },
- "passthrough_request_headers": {
- "A list of headers to whitelist and pass from the request to the backend.",
- "",
- },
- "token_type": {
- "The type of token to issue (service or batch).",
- "",
- },
- "raw": {
- "Write, Read, and Delete data directly in the Storage backend.",
- "",
- },
- "internal-ui-mounts": {
- "Information about mounts returned according to their tuned visibility. Internal API; its location, inputs, and outputs may change.",
- "",
- },
- "internal-ui-namespaces": {
- "Information about visible child namespaces. Internal API; its location, inputs, and outputs may change.",
- `Information about visible child namespaces returned starting from the request's
- context namespace and filtered based on access from the client token. Internal API;
- its location, inputs, and outputs may change.`,
- },
- "internal-ui-resultant-acl": {
- "Information about a token's resultant ACL. Internal API; its location, inputs, and outputs may change.",
- "",
- },
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go b/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go
deleted file mode 100644
index 28fae412..00000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_system_helpers.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "time"
-
- memdb "github.com/hashicorp/go-memdb"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-var (
- invalidateMFAConfig = func(context.Context, *SystemBackend, string) {}
-
- sysInvalidate = func(b *SystemBackend) func(context.Context, string) {
- return nil
- }
-
- getSystemSchemas = func() []func() *memdb.TableSchema { return nil }
-
- getEGPListResponseKeyInfo = func(*SystemBackend, *namespace.Namespace) map[string]interface{} { return nil }
- addSentinelPolicyData = func(map[string]interface{}, *Policy) {}
- inputSentinelPolicyData = func(*framework.FieldData, *Policy) *logical.Response { return nil }
-
- controlGroupUnwrap = func(context.Context, *SystemBackend, string, bool) (string, error) {
- return "", errors.New("control groups unavailable")
- }
-
- pathInternalUINamespacesRead = func(b *SystemBackend) framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) {
- // Short-circuit here if there's no client token provided
- if req.ClientToken == "" {
- return nil, fmt.Errorf("client token empty")
- }
-
- // Load the ACL policies so we can check for access and filter namespaces
- _, te, entity, _, err := b.Core.fetchACLTokenEntryAndEntity(ctx, req)
- if err != nil {
- return nil, err
- }
- if entity != nil && entity.Disabled {
- b.logger.Warn("permission denied as the entity on the token is disabled")
- return nil, logical.ErrPermissionDenied
- }
- if te != nil && te.EntityID != "" && entity == nil {
- b.logger.Warn("permission denied as the entity on the token is invalid")
- return nil, logical.ErrPermissionDenied
- }
-
- return logical.ListResponse([]string{""}), nil
- }
- }
-
- pathLicenseRead = func(b *SystemBackend) framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return nil, nil
- }
- }
-
- pathLicenseUpdate = func(b *SystemBackend) framework.OperationFunc {
- return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return nil, nil
- }
- }
-
- entPaths = func(b *SystemBackend) []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "replication/status",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- resp := &logical.Response{
- Data: map[string]interface{}{
- "mode": "disabled",
- },
- }
- return resp, nil
- },
- },
- },
- }
- }
-)
-
-// tuneMount is used to set config on a mount point
-func (b *SystemBackend) tuneMountTTLs(ctx context.Context, path string, me *MountEntry, newDefault, newMax time.Duration) error {
- zero := time.Duration(0)
-
- switch {
- case newDefault == zero && newMax == zero:
- // No checks needed
-
- case newDefault == zero && newMax != zero:
- // No default/max conflict, no checks needed
-
- case newDefault != zero && newMax == zero:
- // No default/max conflict, no checks needed
-
- case newDefault != zero && newMax != zero:
- if newMax < newDefault {
- return fmt.Errorf("backend max lease TTL of %d would be less than backend default lease TTL of %d", int(newMax.Seconds()), int(newDefault.Seconds()))
- }
- }
-
- origMax := me.Config.MaxLeaseTTL
- origDefault := me.Config.DefaultLeaseTTL
-
- me.Config.MaxLeaseTTL = newMax
- me.Config.DefaultLeaseTTL = newDefault
-
- // Update the mount table
- var err error
- switch {
- case strings.HasPrefix(path, credentialRoutePrefix):
- err = b.Core.persistAuth(ctx, b.Core.auth, &me.Local)
- default:
- err = b.Core.persistMounts(ctx, b.Core.mounts, &me.Local)
- }
- if err != nil {
- me.Config.MaxLeaseTTL = origMax
- me.Config.DefaultLeaseTTL = origDefault
- return fmt.Errorf("failed to update mount table, rolling back TTL changes")
- }
- if b.Core.logger.IsInfo() {
- b.Core.logger.Info("mount tuning of leases successful", "path", path)
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/logical_system_paths.go b/vendor/github.com/hashicorp/vault/vault/logical_system_paths.go
deleted file mode 100644
index 366f8353..00000000
--- a/vendor/github.com/hashicorp/vault/vault/logical_system_paths.go
+++ /dev/null
@@ -1,1515 +0,0 @@
-package vault
-
-import (
- "strings"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-func (b *SystemBackend) configPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "config/cors$",
-
- Fields: map[string]*framework.FieldSchema{
- "enable": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Enables or disables CORS headers on requests.",
- },
- "allowed_origins": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "A comma-separated string or array of strings indicating origins that may make cross-origin requests.",
- },
- "allowed_headers": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "A comma-separated string or array of strings indicating headers that are allowed on cross-origin requests.",
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleCORSRead,
- Summary: "Return the current CORS settings.",
- Description: "",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleCORSUpdate,
- Summary: "Configure the CORS settings.",
- Description: "",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleCORSDelete,
- Summary: "Remove any CORS settings.",
- },
- },
-
- HelpDescription: strings.TrimSpace(sysHelp["config/cors"][0]),
- HelpSynopsis: strings.TrimSpace(sysHelp["config/cors"][1]),
- },
-
- {
- Pattern: "config/ui/headers/" + framework.GenericNameRegex("header"),
-
- Fields: map[string]*framework.FieldSchema{
- "header": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The name of the header.",
- },
- "values": &framework.FieldSchema{
- Type: framework.TypeStringSlice,
- Description: "The values to set the header.",
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleConfigUIHeadersRead,
- Summary: "Return the given UI header's configuration",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleConfigUIHeadersUpdate,
- Summary: "Configure the values to be returned for the UI header.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleConfigUIHeadersDelete,
- Summary: "Remove a UI header.",
- },
- },
-
- HelpDescription: strings.TrimSpace(sysHelp["config/ui/headers"][0]),
- HelpSynopsis: strings.TrimSpace(sysHelp["config/ui/headers"][1]),
- },
-
- {
- Pattern: "config/ui/headers/$",
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ListOperation: &framework.PathOperation{
- Callback: b.handleConfigUIHeadersList,
- Summary: "Return a list of configured UI headers.",
- },
- },
-
- HelpDescription: strings.TrimSpace(sysHelp["config/ui/headers"][0]),
- HelpSynopsis: strings.TrimSpace(sysHelp["config/ui/headers"][1]),
- },
-
- {
- Pattern: "generate-root(/attempt)?$",
- Fields: map[string]*framework.FieldSchema{
- "pgp_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies a base64-encoded PGP public key.",
- },
- },
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Summary: "Read the configuration and progress of the current root generation attempt.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Initializes a new root generation attempt.",
- Description: "Only a single root generation attempt can take place at a time. One (and only one) of otp or pgp_key are required.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Summary: "Cancels any in-progress root generation attempt.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["generate-root"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["generate-root"][1]),
- },
- {
- Pattern: "generate-root/update$",
- Fields: map[string]*framework.FieldSchema{
- "key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies a single master key share.",
- },
- "nonce": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies the nonce of the attempt.",
- },
- },
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Enter a single master key share to progress the root generation attempt.",
- Description: "If the threshold number of master key shares is reached, Vault will complete the root generation and issue the new token. Otherwise, this API must be called multiple times until that threshold is met. The attempt nonce must be provided with each call.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["generate-root"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["generate-root"][1]),
- },
- {
- Pattern: "health$",
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Summary: "Returns the health status of Vault.",
- Responses: map[int][]framework.Response{
- 200: {{Description: "initialized, unsealed, and active"}},
- 429: {{Description: "unsealed and standby"}},
- 472: {{Description: "data recovery mode replication secondary and active"}},
- 501: {{Description: "not initialized"}},
- 503: {{Description: "sealed"}},
- },
- },
- },
- },
-
- {
- Pattern: "init$",
- Fields: map[string]*framework.FieldSchema{
- "pgp_keys": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as `secret_shares`.",
- },
- "root_token_pgp_key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies a PGP public key used to encrypt the initial root token. The key must be base64-encoded from its original binary representation.",
- },
- "secret_shares": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Specifies the number of shares to split the master key into.",
- },
- "secret_threshold": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Specifies the number of shares required to reconstruct the master key. This must be less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as `secret_shares`.",
- },
- "stored_shares": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Specifies the number of shares that should be encrypted by the HSM and stored for auto-unsealing. Currently must be the same as `secret_shares`.",
- },
- "recovery_shares": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Specifies the number of shares to split the recovery key into.",
- },
- "recovery_threshold": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: " Specifies the number of shares required to reconstruct the recovery key. This must be less than or equal to `recovery_shares`.",
- },
- "recovery_pgp_keys": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Specifies an array of PGP public keys used to encrypt the output recovery keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as `recovery_shares`.",
- },
- },
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Summary: "Returns the initialization status of Vault.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Initialize a new Vault.",
- Description: "The Vault must not have been previously initialized. The recovery options, as well as the stored shares option, are only available when using Vault HSM.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["init"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["init"][1]),
- },
- {
- Pattern: "leader$",
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Summary: "Returns the high availability status and current leader instance of Vault.",
- },
- },
-
- HelpSynopsis: "Check the high availability status and current leader of Vault",
- },
- {
- Pattern: "step-down$",
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Cause the node to give up active status.",
- Description: "This endpoint forces the node to give up active status. If the node does not have active status, this endpoint does nothing. Note that the node will sleep for ten seconds before attempting to grab the active lock again, but if no standby nodes grab the active lock in the interim, the same node may become the active node again.",
- Responses: map[int][]framework.Response{
- 204: {{Description: "empty body"}},
- },
- },
- },
- },
- }
-}
-
-func (b *SystemBackend) rekeyPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "rekey/init",
-
- Fields: map[string]*framework.FieldSchema{
- "secret_shares": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Specifies the number of shares to split the master key into.",
- },
- "secret_threshold": &framework.FieldSchema{
- Type: framework.TypeInt,
- Description: "Specifies the number of shares required to reconstruct the master key. This must be less than or equal secret_shares. If using Vault HSM with auto-unsealing, this value must be the same as secret_shares.",
- },
- "pgp_keys": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Specifies an array of PGP public keys used to encrypt the output unseal keys. Ordering is preserved. The keys must be base64-encoded from their original binary representation. The size of this array must be the same as secret_shares.",
- },
- "backup": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Specifies if using PGP-encrypted keys, whether Vault should also store a plaintext backup of the PGP-encrypted keys.",
- },
- "require_verification": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Turns on verification functionality",
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Summary: "Reads the configuration and progress of the current rekey attempt.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Initializes a new rekey attempt.",
- Description: "Only a single rekey attempt can take place at a time, and changing the parameters of a rekey requires canceling and starting a new rekey, which will also provide a new nonce.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Summary: "Cancels any in-progress rekey.",
- Description: "This clears the rekey settings as well as any progress made. This must be called to change the parameters of the rekey. Note: verification is still a part of a rekey. If rekeying is canceled during the verification flow, the current unseal keys remain valid.",
- },
- },
- },
- {
- Pattern: "rekey/backup$",
-
- Fields: map[string]*framework.FieldSchema{},
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleRekeyRetrieveBarrier,
- Summary: "Return the backup copy of PGP-encrypted unseal keys.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleRekeyDeleteBarrier,
- Summary: "Delete the backup copy of PGP-encrypted unseal keys.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- },
-
- {
- Pattern: "rekey/recovery-key-backup$",
-
- Fields: map[string]*framework.FieldSchema{},
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleRekeyRetrieveRecovery,
- logical.DeleteOperation: b.handleRekeyDeleteRecovery,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
- },
- {
- Pattern: "rekey/update",
-
- Fields: map[string]*framework.FieldSchema{
- "key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies a single master key share.",
- },
- "nonce": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies the nonce of the rekey attempt.",
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Enter a single master key share to progress the rekey of the Vault.",
- },
- },
- },
- {
- Pattern: "rekey/verify",
-
- Fields: map[string]*framework.FieldSchema{
- "key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies a single master share key from the new set of shares.",
- },
- "nonce": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies the nonce of the rekey verification operation.",
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Summary: "Read the configuration and progress of the current rekey verification attempt.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Summary: "Cancel any in-progress rekey verification operation.",
- Description: "This clears any progress made and resets the nonce. Unlike a `DELETE` against `sys/rekey/init`, this only resets the current verification operation, not the entire rekey atttempt.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Enter a single new key share to progress the rekey verification operation.",
- },
- },
- },
-
- {
- Pattern: "seal-status$",
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Summary: "Check the seal status of a Vault.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["seal-status"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["seal-status"][1]),
- },
-
- {
- Pattern: "seal$",
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Seal the Vault.",
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["seal"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["seal"][1]),
- },
-
- {
- Pattern: "unseal$",
- Fields: map[string]*framework.FieldSchema{
- "key": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Specifies a single master key share. This is required unless reset is true.",
- },
- "reset": &framework.FieldSchema{
- Type: framework.TypeBool,
- Description: "Specifies if previously-provided unseal keys are discarded and the unseal process is reset.",
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Summary: "Unseal the Vault.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["unseal"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["unseal"][1]),
- },
- }
-}
-
-func (b *SystemBackend) auditPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "audit-hash/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_path"][0]),
- },
-
- "input": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleAuditHash,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audit-hash"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audit-hash"][1]),
- },
-
- {
- Pattern: "audit$",
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleAuditTable,
- Summary: "List the enabled audit devices.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audit-table"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audit-table"][1]),
- },
-
- {
- Pattern: "audit/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_path"][0]),
- },
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_type"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["audit_desc"][0]),
- },
- "options": &framework.FieldSchema{
- Type: framework.TypeKVPairs,
- Description: strings.TrimSpace(sysHelp["audit_opts"][0]),
- },
- "local": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["mount_local"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleEnableAudit,
- Summary: "Enable a new audit device at the supplied path.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleDisableAudit,
- Summary: "Disable the audit device at the given path.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audit"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audit"][1]),
- },
-
- {
- Pattern: "config/auditing/request-headers/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "header": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- "hmac": &framework.FieldSchema{
- Type: framework.TypeBool,
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleAuditedHeaderUpdate,
- Summary: "Enable auditing of a header.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleAuditedHeaderDelete,
- Summary: "Disable auditing of the given request header.",
- },
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleAuditedHeaderRead,
- Summary: "List the information for the given request header.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers-name"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audited-headers-name"][1]),
- },
-
- {
- Pattern: "config/auditing/request-headers$",
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleAuditedHeadersRead,
- Summary: "List the request headers that are configured to be audited.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]),
- },
- }
-}
-
-func (b *SystemBackend) sealPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "key-status$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleKeyStatus,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["key-status"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["key-status"][1]),
- },
-
- {
- Pattern: "rotate$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRotate,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rotate"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rotate"][1]),
- },
- }
-}
-
-func (b *SystemBackend) pluginsCatalogCRUDPath() *framework.Path {
- return &framework.Path{
- Pattern: "plugins/catalog(/(?Pauth|database|secret))?/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_name"][0]),
- },
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_type"][0]),
- },
- "sha256": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]),
- },
- "sha_256": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_sha-256"][0]),
- },
- "command": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_command"][0]),
- },
- "args": &framework.FieldSchema{
- Type: framework.TypeStringSlice,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_args"][0]),
- },
- "env": &framework.FieldSchema{
- Type: framework.TypeStringSlice,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_env"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handlePluginCatalogUpdate,
- Summary: "Register a new plugin, or updates an existing one with the supplied name.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handlePluginCatalogDelete,
- Summary: "Remove the plugin with the given name.",
- },
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handlePluginCatalogRead,
- Summary: "Return the configuration data for the plugin with the given name.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
- }
-}
-
-func (b *SystemBackend) pluginsCatalogListPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "plugins/catalog/(?Pauth|database|secret)/?$",
-
- Fields: map[string]*framework.FieldSchema{
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-catalog_type"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ListOperation: &framework.PathOperation{
- Callback: b.handlePluginCatalogTypedList,
- Summary: "List the plugins in the catalog.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
- },
- {
- Pattern: "plugins/catalog/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handlePluginCatalogUntypedList,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog-list-all"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog-list-all"][1]),
- },
- }
-}
-
-func (b *SystemBackend) pluginsReloadPath() *framework.Path {
- return &framework.Path{
- Pattern: "plugins/reload/backend$",
-
- Fields: map[string]*framework.FieldSchema{
- "plugin": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["plugin-backend-reload-plugin"][0]),
- },
- "mounts": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["plugin-backend-reload-mounts"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handlePluginReloadUpdate,
- Summary: "Reload mounted plugin backends.",
- Description: "Either the plugin name (`plugin`) or the desired plugin backend mounts (`mounts`) must be provided, but not both. In the case that the plugin name is provided, all mounted paths that use that plugin backend will be reloaded.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["plugin-reload"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["plugin-reload"][1]),
- }
-}
-
-func (b *SystemBackend) toolsPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "tools/hash" + framework.OptionalParamRegex("urlalgorithm"),
- Fields: map[string]*framework.FieldSchema{
- "input": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The base64-encoded input data",
- },
-
- "algorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "sha2-256",
- Description: `Algorithm to use (POST body parameter). Valid values are:
-
- * sha2-224
- * sha2-256
- * sha2-384
- * sha2-512
-
- Defaults to "sha2-256".`,
- },
-
- "urlalgorithm": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: `Algorithm to use (POST URL parameter)`,
- },
-
- "format": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "hex",
- Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathHashWrite,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["hash"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["hash"][1]),
- },
-
- {
- Pattern: "tools/random" + framework.OptionalParamRegex("urlbytes"),
- Fields: map[string]*framework.FieldSchema{
- "urlbytes": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The number of bytes to generate (POST URL parameter)",
- },
-
- "bytes": &framework.FieldSchema{
- Type: framework.TypeInt,
- Default: 32,
- Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).",
- },
-
- "format": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "base64",
- Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.pathRandomWrite,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["random"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["random"][1]),
- },
- }
-}
-
-func (b *SystemBackend) internalPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "internal/specs/openapi",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.pathInternalOpenAPI,
- },
- },
- {
- Pattern: "internal/specs/openapi",
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.pathInternalOpenAPI,
- Summary: "Generate an OpenAPI 3 document of all mounted paths.",
- },
- },
- },
- {
- Pattern: "internal/ui/mounts",
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.pathInternalUIMountsRead,
- Summary: "Lists all enabled and visible auth and secrets mounts.",
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-mounts"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["internal-ui-mounts"][1]),
- },
- {
- Pattern: "internal/ui/mounts/(?P.+)",
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The path of the mount.",
- },
- },
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.pathInternalUIMountRead,
- Summary: "Return information about the given mount.",
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-mounts"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["internal-ui-mounts"][1]),
- },
- {
- Pattern: "internal/ui/namespaces",
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: pathInternalUINamespacesRead(b),
- Unpublished: true,
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-namespaces"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["internal-ui-namespaces"][1]),
- },
- {
- Pattern: "internal/ui/resultant-acl",
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.pathInternalUIResultantACL,
- Unpublished: true,
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-resultant-acl"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["internal-ui-resultant-acl"][1]),
- },
- }
-}
-
-func (b *SystemBackend) capabilitiesPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "capabilities-accessor$",
-
- Fields: map[string]*framework.FieldSchema{
- "accessor": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Accessor of the token for which capabilities are being queried.",
- },
- "path": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "(DEPRECATED) Path on which capabilities are being queried. Use 'paths' instead.",
- Deprecated: true,
- },
- "paths": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Paths on which capabilities are being queried.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleCapabilitiesAccessor,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_accessor"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["capabilities_accessor"][1]),
- },
-
- {
- Pattern: "capabilities$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token for which capabilities are being queried.",
- },
- "path": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "(DEPRECATED) Path on which capabilities are being queried. Use 'paths' instead.",
- Deprecated: true,
- },
- "paths": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Paths on which capabilities are being queried.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleCapabilities,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["capabilities"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["capabilities"][1]),
- },
-
- {
- Pattern: "capabilities-self$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token for which capabilities are being queried.",
- },
- "path": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "(DEPRECATED) Path on which capabilities are being queried. Use 'paths' instead.",
- Deprecated: true,
- },
- "paths": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: "Paths on which capabilities are being queried.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleCapabilities,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_self"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["capabilities_self"][1]),
- },
- }
-}
-
-func (b *SystemBackend) leasePaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "leases/lookup/(?P.+?)?",
-
- Fields: map[string]*framework.FieldSchema{
- "prefix": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["leases-list-prefix"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ListOperation: &framework.PathOperation{
- Callback: b.handleLeaseLookupList,
- Summary: "Returns a list of lease ids.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
- },
-
- {
- Pattern: "leases/lookup",
-
- Fields: map[string]*framework.FieldSchema{
- "lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleLeaseLookup,
- Summary: "Retrieve lease metadata.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
- },
-
- {
- Pattern: "(leases/)?renew" + framework.OptionalParamRegex("url_lease_id"),
-
- Fields: map[string]*framework.FieldSchema{
- "url_lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- "lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- "increment": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Description: strings.TrimSpace(sysHelp["increment"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleRenew,
- Summary: "Renews a lease, requesting to extend the lease.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["renew"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["renew"][1]),
- },
-
- {
- Pattern: "(leases/)?revoke" + framework.OptionalParamRegex("url_lease_id"),
-
- Fields: map[string]*framework.FieldSchema{
- "url_lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- "lease_id": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["lease_id"][0]),
- },
- "sync": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: strings.TrimSpace(sysHelp["revoke-sync"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleRevoke,
- Summary: "Revokes a lease immediately.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["revoke"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["revoke"][1]),
- },
-
- {
- Pattern: "(leases/)?revoke-force/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "prefix": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["revoke-force-path"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleRevokeForce,
- Summary: "Revokes all secrets or tokens generated under a given prefix immediately",
- Description: "Unlike `/sys/leases/revoke-prefix`, this path ignores backend errors encountered during revocation. This is potentially very dangerous and should only be used in specific emergency situations where errors in the backend or the connected backend service prevent normal revocation.\n\nBy ignoring these errors, Vault abdicates responsibility for ensuring that the issued credentials or secrets are properly revoked and/or cleaned up. Access to this endpoint should be tightly controlled.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["revoke-force"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["revoke-force"][1]),
- },
-
- {
- Pattern: "(leases/)?revoke-prefix/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "prefix": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["revoke-prefix-path"][0]),
- },
- "sync": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: strings.TrimSpace(sysHelp["revoke-sync"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleRevokePrefix,
- Summary: "Revokes all secrets (via a lease ID prefix) or tokens (via the tokens' path property) generated under a given prefix immediately.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["revoke-prefix"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["revoke-prefix"][1]),
- },
-
- {
- Pattern: "leases/tidy$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleTidyLeases,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["tidy_leases"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["tidy_leases"][1]),
- },
- }
-}
-
-func (b *SystemBackend) remountPath() *framework.Path {
- return &framework.Path{
- Pattern: "remount",
-
- Fields: map[string]*framework.FieldSchema{
- "from": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The previous mount point.",
- },
- "to": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "The new mount point.",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleRemount,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["remount"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["remount"][1]),
- }
-}
-
-func (b *SystemBackend) authPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "auth$",
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleAuthTable,
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["auth-table"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["auth-table"][1]),
- },
- {
- Pattern: "auth/(?P.+?)/tune$",
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_tune"][0]),
- },
- "default_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
- },
- "max_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
- },
- "audit_non_hmac_request_keys": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_request_keys"][0]),
- },
- "audit_non_hmac_response_keys": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_response_keys"][0]),
- },
- "options": &framework.FieldSchema{
- Type: framework.TypeKVPairs,
- Description: strings.TrimSpace(sysHelp["tune_mount_options"][0]),
- },
- "listing_visibility": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["listing_visibility"][0]),
- },
- "passthrough_request_headers": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["passthrough_request_headers"][0]),
- },
- "token_type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["token_type"][0]),
- },
- },
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleAuthTuneRead,
- Summary: "Reads the given auth path's configuration.",
- Description: "This endpoint requires sudo capability on the final path, but the same functionality can be achieved without sudo via `sys/mounts/auth/[auth-path]/tune`.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleAuthTuneWrite,
- Summary: "Tune configuration parameters for a given auth path.",
- Description: "This endpoint requires sudo capability on the final path, but the same functionality can be achieved without sudo via `sys/mounts/auth/[auth-path]/tune`.",
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["auth_tune"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["auth_tune"][1]),
- },
- {
- Pattern: "auth/(?P.+)",
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_path"][0]),
- },
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_type"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
- },
- "config": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: strings.TrimSpace(sysHelp["auth_config"][0]),
- },
- "local": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["mount_local"][0]),
- },
- "seal_wrap": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["seal_wrap"][0]),
- },
- "plugin_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_plugin"][0]),
- },
- "options": &framework.FieldSchema{
- Type: framework.TypeKVPairs,
- Description: strings.TrimSpace(sysHelp["auth_options"][0]),
- },
- },
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleEnableAuth,
- Summary: "Enables a new auth method.",
- Description: `After enabling, the auth method can be accessed and configured via the auth path specified as part of the URL. This auth path will be nested under the auth prefix.
-
-For example, enable the "foo" auth method will make it accessible at /auth/foo.`,
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleDisableAuth,
- Summary: "Disable the auth method at the given auth path",
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["auth"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["auth"][1]),
- },
- }
-}
-
-func (b *SystemBackend) policyPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "policy/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handlePoliciesList(PolicyTypeACL),
- logical.ListOperation: b.handlePoliciesList(PolicyTypeACL),
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["policy-list"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["policy-list"][1]),
- },
-
- {
- Pattern: "policy/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["policy-name"][0]),
- },
- "rules": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["policy-rules"][0]),
- Deprecated: true,
- },
- "policy": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["policy-rules"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handlePoliciesRead(PolicyTypeACL),
- Summary: "Retrieve the policy body for the named policy.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handlePoliciesSet(PolicyTypeACL),
- Summary: "Add a new or update an existing policy.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handlePoliciesDelete(PolicyTypeACL),
- Summary: "Delete the policy with the given name.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["policy"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["policy"][1]),
- },
-
- {
- Pattern: "policies/acl/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: b.handlePoliciesList(PolicyTypeACL),
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["policy-list"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["policy-list"][1]),
- },
-
- {
- Pattern: "policies/acl/(?P.+)",
-
- Fields: map[string]*framework.FieldSchema{
- "name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["policy-name"][0]),
- },
- "policy": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["policy-rules"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handlePoliciesRead(PolicyTypeACL),
- Summary: "Retrieve information about the named ACL policy.",
- },
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handlePoliciesSet(PolicyTypeACL),
- Summary: "Add a new or update an existing ACL policy.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handlePoliciesDelete(PolicyTypeACL),
- Summary: "Delete the ACL policy with the given name.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["policy"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["policy"][1]),
- },
- }
-}
-
-func (b *SystemBackend) wrappingPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "wrapping/wrap$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleWrappingWrap,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["wrap"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["wrap"][1]),
- },
-
- {
- Pattern: "wrapping/unwrap$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleWrappingUnwrap,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["unwrap"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["unwrap"][1]),
- },
-
- {
- Pattern: "wrapping/lookup$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleWrappingLookup,
- Summary: "Look up wrapping properties for the given token.",
- },
- logical.ReadOperation: &framework.PathOperation{
- Callback: b.handleWrappingLookup,
- Summary: "Look up wrapping properties for the requester's token.",
- },
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["wraplookup"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["wraplookup"][1]),
- },
-
- {
- Pattern: "wrapping/rewrap$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: b.handleWrappingRewrap,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["rewrap"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["rewrap"][1]),
- },
- }
-}
-
-func (b *SystemBackend) mountPaths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "mounts/(?P.+?)/tune$",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_path"][0]),
- },
- "default_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
- },
- "max_lease_ttl": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
- },
- "audit_non_hmac_request_keys": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_request_keys"][0]),
- },
- "audit_non_hmac_response_keys": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["tune_audit_non_hmac_response_keys"][0]),
- },
- "options": &framework.FieldSchema{
- Type: framework.TypeKVPairs,
- Description: strings.TrimSpace(sysHelp["tune_mount_options"][0]),
- },
- "listing_visibility": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["listing_visibility"][0]),
- },
- "passthrough_request_headers": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: strings.TrimSpace(sysHelp["passthrough_request_headers"][0]),
- },
- "token_type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["token_type"][0]),
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleMountTuneRead,
- logical.UpdateOperation: b.handleMountTuneWrite,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["mount_tune"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["mount_tune"][1]),
- },
-
- {
- Pattern: "mounts/(?P.+?)",
-
- Fields: map[string]*framework.FieldSchema{
- "path": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_path"][0]),
- },
- "type": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_type"][0]),
- },
- "description": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_desc"][0]),
- },
- "config": &framework.FieldSchema{
- Type: framework.TypeMap,
- Description: strings.TrimSpace(sysHelp["mount_config"][0]),
- },
- "local": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["mount_local"][0]),
- },
- "seal_wrap": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: strings.TrimSpace(sysHelp["seal_wrap"][0]),
- },
- "plugin_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: strings.TrimSpace(sysHelp["mount_plugin_name"][0]),
- },
- "options": &framework.FieldSchema{
- Type: framework.TypeKVPairs,
- Description: strings.TrimSpace(sysHelp["mount_options"][0]),
- },
- },
-
- Operations: map[logical.Operation]framework.OperationHandler{
- logical.UpdateOperation: &framework.PathOperation{
- Callback: b.handleMount,
- Summary: "Enable a new secrets engine at the given path.",
- },
- logical.DeleteOperation: &framework.PathOperation{
- Callback: b.handleUnmount,
- Summary: "Disable the mount point specified at the given path.",
- },
- },
- HelpSynopsis: strings.TrimSpace(sysHelp["mount"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["mount"][1]),
- },
-
- {
- Pattern: "mounts$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: b.handleMountTable,
- },
-
- HelpSynopsis: strings.TrimSpace(sysHelp["mounts"][0]),
- HelpDescription: strings.TrimSpace(sysHelp["mounts"][1]),
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/mount.go b/vendor/github.com/hashicorp/vault/vault/mount.go
deleted file mode 100644
index fd9905e3..00000000
--- a/vendor/github.com/hashicorp/vault/vault/mount.go
+++ /dev/null
@@ -1,1343 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "fmt"
- "os"
- "sort"
- "strings"
- "sync"
- "time"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/builtin/plugin"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/mitchellh/copystructure"
-)
-
-const (
- // coreMountConfigPath is used to store the mount configuration.
- // Mounts are protected within the Vault itself, which means they
- // can only be viewed or modified after an unseal.
- coreMountConfigPath = "core/mounts"
-
- // coreLocalMountConfigPath is used to store mount configuration for local
- // (non-replicated) mounts
- coreLocalMountConfigPath = "core/local-mounts"
-
- // backendBarrierPrefix is the prefix to the UUID used in the
- // barrier view for the backends.
- backendBarrierPrefix = "logical/"
-
- // systemBarrierPrefix is the prefix used for the
- // system logical backend.
- systemBarrierPrefix = "sys/"
-
- // mountTableType is the value we expect to find for the mount table and
- // corresponding entries
- mountTableType = "mounts"
-)
-
-// ListingVisibilityType represents the types for listing visibility
-type ListingVisibilityType string
-
-const (
- // ListingVisibilityDefault is the default value for listing visibility
- ListingVisibilityDefault ListingVisibilityType = ""
- // ListingVisibilityHidden is the hidden type for listing visibility
- ListingVisibilityHidden ListingVisibilityType = "hidden"
- // ListingVisibilityUnauth is the unauth type for listing visibility
- ListingVisibilityUnauth ListingVisibilityType = "unauth"
-
- systemMountPath = "sys/"
- identityMountPath = "identity/"
- cubbyholeMountPath = "cubbyhole/"
-
- systemMountType = "system"
- identityMountType = "identity"
- cubbyholeMountType = "cubbyhole"
- pluginMountType = "plugin"
-
- MountTableUpdateStorage = true
- MountTableNoUpdateStorage = false
-)
-
-var (
- // loadMountsFailed if loadMounts encounters an error
- errLoadMountsFailed = errors.New("failed to setup mount table")
-
- // protectedMounts cannot be remounted
- protectedMounts = []string{
- "audit/",
- "auth/",
- systemMountPath,
- cubbyholeMountPath,
- identityMountPath,
- }
-
- untunableMounts = []string{
- cubbyholeMountPath,
- systemMountPath,
- "audit/",
- identityMountPath,
- }
-
- // singletonMounts can only exist in one location and are
- // loaded by default. These are types, not paths.
- singletonMounts = []string{
- cubbyholeMountType,
- systemMountType,
- "token",
- identityMountType,
- }
-
- // mountAliases maps old backend names to new backend names, allowing us
- // to move/rename backends but maintain backwards compatibility
- mountAliases = map[string]string{"generic": "kv"}
-)
-
-func (c *Core) generateMountAccessor(entryType string) (string, error) {
- var accessor string
- for {
- randBytes, err := uuid.GenerateRandomBytes(4)
- if err != nil {
- return "", err
- }
- accessor = fmt.Sprintf("%s_%s", entryType, fmt.Sprintf("%08x", randBytes[0:4]))
- if entry := c.router.MatchingMountByAccessor(accessor); entry == nil {
- break
- }
- }
-
- return accessor, nil
-}
-
-// MountTable is used to represent the internal mount table
-type MountTable struct {
- Type string `json:"type"`
- Entries []*MountEntry `json:"entries"`
-}
-
-// shallowClone returns a copy of the mount table that
-// keeps the MountEntry locations, so as not to invalidate
-// other locations holding pointers. Care needs to be taken
-// if modifying entries rather than modifying the table itself
-func (t *MountTable) shallowClone() *MountTable {
- mt := &MountTable{
- Type: t.Type,
- Entries: make([]*MountEntry, len(t.Entries)),
- }
- for i, e := range t.Entries {
- mt.Entries[i] = e
- }
- return mt
-}
-
-// setTaint is used to set the taint on given entry Accepts either the mount
-// entry's path or namespace + path, i.e. /secret/ or /token/
-func (t *MountTable) setTaint(ctx context.Context, path string, value bool) (*MountEntry, error) {
- n := len(t.Entries)
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- for i := 0; i < n; i++ {
- if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID {
- t.Entries[i].Tainted = value
- return t.Entries[i], nil
- }
- }
- return nil, nil
-}
-
-// remove is used to remove a given path entry; returns the entry that was
-// removed
-func (t *MountTable) remove(ctx context.Context, path string) (*MountEntry, error) {
- n := len(t.Entries)
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- for i := 0; i < n; i++ {
- if entry := t.Entries[i]; entry.Path == path && entry.Namespace().ID == ns.ID {
- t.Entries[i], t.Entries[n-1] = t.Entries[n-1], nil
- t.Entries = t.Entries[:n-1]
- return entry, nil
- }
- }
- return nil, nil
-}
-
-// sortEntriesByPath sorts the entries in the table by path and returns the
-// table; this is useful for tests
-func (t *MountTable) sortEntriesByPath() *MountTable {
- sort.Slice(t.Entries, func(i, j int) bool {
- return t.Entries[i].Path < t.Entries[j].Path
- })
- return t
-}
-
-// sortEntriesByPath sorts the entries in the table by path and returns the
-// table; this is useful for tests
-func (t *MountTable) sortEntriesByPathDepth() *MountTable {
- sort.Slice(t.Entries, func(i, j int) bool {
- return len(strings.Split(t.Entries[i].Namespace().Path+t.Entries[i].Path, "/")) < len(strings.Split(t.Entries[j].Namespace().Path+t.Entries[j].Path, "/"))
- })
- return t
-}
-
-// MountEntry is used to represent a mount table entry
-type MountEntry struct {
- Table string `json:"table"` // The table it belongs to
- Path string `json:"path"` // Mount Path
- Type string `json:"type"` // Logical backend Type
- Description string `json:"description"` // User-provided description
- UUID string `json:"uuid"` // Barrier view UUID
- BackendAwareUUID string `json:"backend_aware_uuid"` // UUID that can be used by the backend as a helper when a consistent value is needed outside of storage.
- Accessor string `json:"accessor"` // Unique but more human-friendly ID. Does not change, not used for any sensitive things (like as a salt, which the UUID sometimes is).
- Config MountConfig `json:"config"` // Configuration related to this mount (but not backend-derived)
- Options map[string]string `json:"options"` // Backend options
- Local bool `json:"local"` // Local mounts are not replicated or affected by replication
- SealWrap bool `json:"seal_wrap"` // Whether to wrap CSPs
- Tainted bool `json:"tainted,omitempty"` // Set as a Write-Ahead flag for unmount/remount
- NamespaceID string `json:"namespace_id"`
-
- // namespace contains the populated namespace
- namespace *namespace.Namespace
-
- // synthesizedConfigCache is used to cache configuration values. These
- // particular values are cached since we want to get them at a point-in-time
- // without separately managing their locks individually. See SyncCache() for
- // the specific values that are being cached.
- synthesizedConfigCache sync.Map
-}
-
-// MountConfig is used to hold settable options
-type MountConfig struct {
- DefaultLeaseTTL time.Duration `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` // Override for global default
- MaxLeaseTTL time.Duration `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` // Override for global default
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` // Override for global default
- AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"`
- AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"`
- ListingVisibility ListingVisibilityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"`
- PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"`
- TokenType logical.TokenType `json:"token_type" structs:"token_type" mapstructure:"token_type"`
-
- // PluginName is the name of the plugin registered in the catalog.
- //
- // Deprecated: MountEntry.Type should be used instead for Vault 1.0.0 and beyond.
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
-}
-
-// APIMountConfig is an embedded struct of api.MountConfigInput
-type APIMountConfig struct {
- DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
- AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"`
- AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"`
- ListingVisibility ListingVisibilityType `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"`
- PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"`
- TokenType string `json:"token_type" structs:"token_type" mapstructure:"token_type"`
-
- // PluginName is the name of the plugin registered in the catalog.
- //
- // Deprecated: MountEntry.Type should be used instead for Vault 1.0.0 and beyond.
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
-}
-
-// Clone returns a deep copy of the mount entry
-func (e *MountEntry) Clone() (*MountEntry, error) {
- cp, err := copystructure.Copy(e)
- if err != nil {
- return nil, err
- }
- return cp.(*MountEntry), nil
-}
-
-// Namespace returns the namespace for the mount entry
-func (e *MountEntry) Namespace() *namespace.Namespace {
- return e.namespace
-}
-
-// APIPath returns the full API Path for the given mount entry
-func (e *MountEntry) APIPath() string {
- path := e.Path
- if e.Table == credentialTableType {
- path = credentialRoutePrefix + path
- }
- return e.namespace.Path + path
-}
-
-// SyncCache syncs tunable configuration values to the cache. In the case of
-// cached values, they should be retrieved via synthesizedConfigCache.Load()
-// instead of accessing them directly through MountConfig.
-func (e *MountEntry) SyncCache() {
- if len(e.Config.AuditNonHMACRequestKeys) == 0 {
- e.synthesizedConfigCache.Delete("audit_non_hmac_request_keys")
- } else {
- e.synthesizedConfigCache.Store("audit_non_hmac_request_keys", e.Config.AuditNonHMACRequestKeys)
- }
-
- if len(e.Config.AuditNonHMACResponseKeys) == 0 {
- e.synthesizedConfigCache.Delete("audit_non_hmac_response_keys")
- } else {
- e.synthesizedConfigCache.Store("audit_non_hmac_response_keys", e.Config.AuditNonHMACResponseKeys)
- }
-
- if len(e.Config.PassthroughRequestHeaders) == 0 {
- e.synthesizedConfigCache.Delete("passthrough_request_headers")
- } else {
- e.synthesizedConfigCache.Store("passthrough_request_headers", e.Config.PassthroughRequestHeaders)
- }
-}
-
-func (c *Core) decodeMountTable(ctx context.Context, raw []byte) (*MountTable, error) {
- // Decode into mount table
- mountTable := new(MountTable)
- if err := jsonutil.DecodeJSON(raw, mountTable); err != nil {
- return nil, err
- }
-
- // Populate the namespace in memory
- var mountEntries []*MountEntry
- for _, entry := range mountTable.Entries {
- if entry.NamespaceID == "" {
- entry.NamespaceID = namespace.RootNamespaceID
- }
- ns, err := NamespaceByID(ctx, entry.NamespaceID, c)
- if err != nil {
- return nil, err
- }
- if ns == nil {
- c.logger.Error("namespace on mount entry not found", "namespace_id", entry.NamespaceID, "mount_path", entry.Path, "mount_description", entry.Description)
- continue
- }
-
- entry.namespace = ns
- mountEntries = append(mountEntries, entry)
- }
-
- return &MountTable{
- Type: mountTable.Type,
- Entries: mountEntries,
- }, nil
-}
-
-// Mount is used to mount a new backend to the mount table.
-func (c *Core) mount(ctx context.Context, entry *MountEntry) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(entry.Path, "/") {
- entry.Path += "/"
- }
-
- // Prevent protected paths from being mounted
- for _, p := range protectedMounts {
- if strings.HasPrefix(entry.Path, p) && entry.namespace == nil {
- return logical.CodedError(403, fmt.Sprintf("cannot mount %q", entry.Path))
- }
- }
-
- // Do not allow more than one instance of a singleton mount
- for _, p := range singletonMounts {
- if entry.Type == p {
- return logical.CodedError(403, fmt.Sprintf("mount type of %q is not mountable", entry.Type))
- }
- }
- return c.mountInternal(ctx, entry, MountTableUpdateStorage)
-}
-
-func (c *Core) mountInternal(ctx context.Context, entry *MountEntry, updateStorage bool) error {
- c.mountsLock.Lock()
- defer c.mountsLock.Unlock()
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
-
- if err := verifyNamespace(c, ns, entry); err != nil {
- return err
- }
-
- entry.NamespaceID = ns.ID
- entry.namespace = ns
-
- // Ensure the cache is populated, don't need the result
- NamespaceByID(ctx, ns.ID, c)
-
- // Verify there are no conflicting mounts
- if match := c.router.MountConflict(ctx, entry.Path); match != "" {
- return logical.CodedError(409, fmt.Sprintf("existing mount at %s", match))
- }
-
- // Generate a new UUID and view
- if entry.UUID == "" {
- entryUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.UUID = entryUUID
- }
- if entry.BackendAwareUUID == "" {
- bUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.BackendAwareUUID = bUUID
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor(entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- }
- // Sync values to the cache
- entry.SyncCache()
-
- viewPath := entry.ViewPath()
- view := NewBarrierView(c.barrier, viewPath)
-
- // Singleton mounts cannot be filtered on a per-secondary basis
- // from replication
- if strutil.StrListContains(singletonMounts, entry.Type) {
- addFilterablePath(c, viewPath)
- }
-
- nilMount, err := preprocessMount(c, entry, view)
- if err != nil {
- return err
- }
- origReadOnlyErr := view.getReadOnlyErr()
-
- // Mark the view as read-only until the mounting is complete and
- // ensure that it is reset after. This ensures that there will be no
- // writes during the construction of the backend.
- view.setReadOnlyErr(logical.ErrSetupReadOnly)
- // We defer this because we're already up and running so we don't need to
- // time it for after postUnseal
- defer view.setReadOnlyErr(origReadOnlyErr)
-
- var backend logical.Backend
- sysView := c.mountEntrySysView(entry)
-
- backend, err = c.newLogicalBackend(ctx, entry, sysView, view)
- if err != nil {
- return err
- }
- if backend == nil {
- return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type)
- }
-
- // Check for the correct backend type
- backendType := backend.Type()
- if backendType != logical.TypeLogical {
- if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" {
- return fmt.Errorf(`unknown backend type: "%s"`, entry.Type)
- }
- }
-
- addPathCheckers(c, entry, backend, viewPath)
-
- c.setCoreBackend(entry, backend, view)
-
- // If the mount is filtered or we are on a DR secondary we don't want to
- // keep the actual backend running, so we clean it up and set it to nil
- // so the router does not have a pointer to the object.
- if nilMount {
- backend.Cleanup(ctx)
- backend = nil
- }
-
- newTable := c.mounts.shallowClone()
- newTable.Entries = append(newTable.Entries, entry)
- if updateStorage {
- if err := c.persistMounts(ctx, newTable, &entry.Local); err != nil {
- c.logger.Error("failed to update mount table", "error", err)
- if err == logical.ErrReadOnly && c.perfStandby {
- return err
- }
-
- return logical.CodedError(500, "failed to update mount table")
- }
- }
- c.mounts = newTable
-
- if err := c.router.Mount(backend, entry.Path, entry, view); err != nil {
- return err
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("successful mount", "namespace", entry.Namespace().Path, "path", entry.Path, "type", entry.Type)
- }
- return nil
-}
-
-// Unmount is used to unmount a path. The boolean indicates whether the mount
-// was found.
-func (c *Core) unmount(ctx context.Context, path string) error {
- // Ensure we end the path in a slash
- if !strings.HasSuffix(path, "/") {
- path += "/"
- }
-
- // Prevent protected paths from being unmounted
- for _, p := range protectedMounts {
- if strings.HasPrefix(path, p) {
- return fmt.Errorf("cannot unmount %q", path)
- }
- }
- return c.unmountInternal(ctx, path, MountTableUpdateStorage)
-}
-
-func (c *Core) unmountInternal(ctx context.Context, path string, updateStorage bool) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
-
- // Verify exact match of the route
- match := c.router.MatchingMount(ctx, path)
- if match == "" || ns.Path+path != match {
- return fmt.Errorf("no matching mount")
- }
-
- // Get the view for this backend
- view := c.router.MatchingStorageByAPIPath(ctx, path)
-
- // Get the backend/mount entry for this path, used to remove ignored
- // replication prefixes
- backend := c.router.MatchingBackend(ctx, path)
- entry := c.router.MatchingMountEntry(ctx, path)
-
- // Mark the entry as tainted
- if err := c.taintMountEntry(ctx, path, updateStorage); err != nil {
- c.logger.Error("failed to taint mount entry for path being unmounted", "error", err, "path", path)
- return err
- }
-
- // Taint the router path to prevent routing. Note that in-flight requests
- // are uncertain, right now.
- if err := c.router.Taint(ctx, path); err != nil {
- return err
- }
-
- rCtx := namespace.ContextWithNamespace(c.activeContext, ns)
- if backend != nil && c.rollback != nil {
- // Invoke the rollback manager a final time
- if err := c.rollback.Rollback(rCtx, path); err != nil {
- return err
- }
- }
- if backend != nil && c.expiration != nil && updateStorage {
- // Revoke all the dynamic keys
- if err := c.expiration.RevokePrefix(rCtx, path, true); err != nil {
- return err
- }
- }
-
- if backend != nil {
- // Call cleanup function if it exists
- backend.Cleanup(ctx)
- }
-
- // Unmount the backend entirely
- if err := c.router.Unmount(ctx, path); err != nil {
- return err
- }
-
- viewPath := entry.ViewPath()
- switch {
- case !updateStorage:
- // Don't attempt to clear data, replication will handle this
- case c.IsDRSecondary(), entry.Local, !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary):
- // Have writable storage, remove the whole thing
- if err := logical.ClearView(ctx, view); err != nil {
- c.logger.Error("failed to clear view for path being unmounted", "error", err, "path", path)
- return err
- }
-
- case !entry.Local && c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary):
- if err := clearIgnoredPaths(ctx, c, backend, viewPath); err != nil {
- return err
- }
- }
- // Remove the mount table entry
- if err := c.removeMountEntry(ctx, path, updateStorage); err != nil {
- c.logger.Error("failed to remove mount entry for path being unmounted", "error", err, "path", path)
- return err
- }
-
- removePathCheckers(c, entry, viewPath)
-
- if c.logger.IsInfo() {
- c.logger.Info("successfully unmounted", "path", path, "namespace", ns.Path)
- }
-
- return nil
-}
-
-// removeMountEntry is used to remove an entry from the mount table
-func (c *Core) removeMountEntry(ctx context.Context, path string, updateStorage bool) error {
- c.mountsLock.Lock()
- defer c.mountsLock.Unlock()
-
- // Remove the entry from the mount table
- newTable := c.mounts.shallowClone()
- entry, err := newTable.remove(ctx, path)
- if err != nil {
- return err
- }
- if entry == nil {
- c.logger.Error("nil entry found removing entry in mounts table", "path", path)
- return logical.CodedError(500, "failed to remove entry in mounts table")
- }
-
- // When unmounting all entries the JSON code will load back up from storage
- // as a nil slice, which kills tests...just set it nil explicitly
- if len(newTable.Entries) == 0 {
- newTable.Entries = nil
- }
-
- if updateStorage {
- // Update the mount table
- if err := c.persistMounts(ctx, newTable, &entry.Local); err != nil {
- c.logger.Error("failed to remove entry from mounts table", "error", err)
- return logical.CodedError(500, "failed to remove entry from mounts table")
- }
- }
-
- c.mounts = newTable
- return nil
-}
-
-// taintMountEntry is used to mark an entry in the mount table as tainted
-func (c *Core) taintMountEntry(ctx context.Context, path string, updateStorage bool) error {
- c.mountsLock.Lock()
- defer c.mountsLock.Unlock()
-
- // As modifying the taint of an entry affects shallow clones,
- // we simply use the original
- entry, err := c.mounts.setTaint(ctx, path, true)
- if err != nil {
- return err
- }
- if entry == nil {
- c.logger.Error("nil entry found tainting entry in mounts table", "path", path)
- return logical.CodedError(500, "failed to taint entry in mounts table")
- }
-
- if updateStorage {
- // Update the mount table
- if err := c.persistMounts(ctx, c.mounts, &entry.Local); err != nil {
- if err == logical.ErrReadOnly && c.perfStandby {
- return err
- }
-
- c.logger.Error("failed to taint entry in mounts table", "error", err)
- return logical.CodedError(500, "failed to taint entry in mounts table")
- }
- }
-
- return nil
-}
-
-// remountForce takes a copy of the mount entry for the path and fully unmounts
-// and remounts the backend to pick up any changes, such as filtered paths
-func (c *Core) remountForce(ctx context.Context, path string) error {
- me := c.router.MatchingMountEntry(ctx, path)
- if me == nil {
- return fmt.Errorf("cannot find mount for path %q", path)
- }
-
- me, err := me.Clone()
- if err != nil {
- return err
- }
-
- if err := c.unmount(ctx, path); err != nil {
- return err
- }
- return c.mount(ctx, me)
-}
-
-// Remount is used to remount a path at a new mount point.
-func (c *Core) remount(ctx context.Context, src, dst string) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
-
- // Ensure we end the path in a slash
- if !strings.HasSuffix(src, "/") {
- src += "/"
- }
- if !strings.HasSuffix(dst, "/") {
- dst += "/"
- }
-
- // Prevent protected paths from being remounted
- for _, p := range protectedMounts {
- if strings.HasPrefix(src, p) {
- return fmt.Errorf("cannot remount %q", src)
- }
- }
-
- // Verify exact match of the route
- srcMatch := c.router.MatchingMountEntry(ctx, src)
- if srcMatch == nil {
- return fmt.Errorf("no matching mount at %q", src)
- }
- if srcMatch.NamespaceID != ns.ID {
- return fmt.Errorf("source mount in a different namespace than request")
- }
-
- if err := verifyNamespace(c, ns, &MountEntry{Path: dst}); err != nil {
- return err
- }
-
- if match := c.router.MatchingMount(ctx, dst); match != "" {
- return fmt.Errorf("existing mount at %q", match)
- }
-
- // Mark the entry as tainted
- if err := c.taintMountEntry(ctx, src, true); err != nil {
- return err
- }
-
- // Taint the router path to prevent routing
- if err := c.router.Taint(ctx, src); err != nil {
- return err
- }
-
- if !c.IsDRSecondary() {
- // Invoke the rollback manager a final time
- rCtx := namespace.ContextWithNamespace(c.activeContext, ns)
- if err := c.rollback.Rollback(rCtx, src); err != nil {
- return err
- }
-
- entry := c.router.MatchingMountEntry(ctx, src)
- if entry == nil {
- return fmt.Errorf("no matching mount at %q", src)
- }
-
- // Revoke all the dynamic keys
- if err := c.expiration.RevokePrefix(rCtx, src, true); err != nil {
- return err
- }
- }
-
- c.mountsLock.Lock()
- var entry *MountEntry
- for _, mountEntry := range c.mounts.Entries {
- if mountEntry.Path == src && mountEntry.NamespaceID == ns.ID {
- entry = mountEntry
- entry.Path = dst
- entry.Tainted = false
- break
- }
- }
-
- if entry == nil {
- c.mountsLock.Unlock()
- c.logger.Error("failed to find entry in mounts table")
- return logical.CodedError(500, "failed to find entry in mounts table")
- }
-
- // Update the mount table
- if err := c.persistMounts(ctx, c.mounts, &entry.Local); err != nil {
- entry.Path = src
- entry.Tainted = true
- c.mountsLock.Unlock()
- if err == logical.ErrReadOnly && c.perfStandby {
- return err
- }
-
- c.logger.Error("failed to update mounts table", "error", err)
- return logical.CodedError(500, "failed to update mounts table")
- }
- c.mountsLock.Unlock()
-
- // Remount the backend
- if err := c.router.Remount(ctx, src, dst); err != nil {
- return err
- }
-
- // Un-taint the path
- if err := c.router.Untaint(ctx, dst); err != nil {
- return err
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("successful remount", "old_path", src, "new_path", dst)
- }
- return nil
-}
-
-// loadMounts is invoked as part of postUnseal to load the mount table
-func (c *Core) loadMounts(ctx context.Context) error {
- // Load the existing mount table
- raw, err := c.barrier.Get(ctx, coreMountConfigPath)
- if err != nil {
- c.logger.Error("failed to read mount table", "error", err)
- return errLoadMountsFailed
- }
- rawLocal, err := c.barrier.Get(ctx, coreLocalMountConfigPath)
- if err != nil {
- c.logger.Error("failed to read local mount table", "error", err)
- return errLoadMountsFailed
- }
-
- c.mountsLock.Lock()
- defer c.mountsLock.Unlock()
-
- if raw != nil {
- // Check if the persisted value has canary in the beginning. If
- // yes, decompress the table and then JSON decode it. If not,
- // simply JSON decode it.
- mountTable, err := c.decodeMountTable(ctx, raw.Value)
- if err != nil {
- c.logger.Error("failed to decompress and/or decode the mount table", "error", err)
- return err
- }
- c.mounts = mountTable
- }
-
- var needPersist bool
- if c.mounts == nil {
- c.logger.Info("no mounts; adding default mount table")
- c.mounts = c.defaultMountTable()
- needPersist = true
- }
-
- if rawLocal != nil {
- localMountTable, err := c.decodeMountTable(ctx, rawLocal.Value)
- if err != nil {
- c.logger.Error("failed to decompress and/or decode the local mount table", "error", err)
- return err
- }
- if localMountTable != nil && len(localMountTable.Entries) > 0 {
- c.mounts.Entries = append(c.mounts.Entries, localMountTable.Entries...)
- }
- }
-
- // Note that this is only designed to work with singletons, as it checks by
- // type only.
-
- // Upgrade to typed mount table
- if c.mounts.Type == "" {
- c.mounts.Type = mountTableType
- needPersist = true
- }
-
- for _, requiredMount := range c.requiredMountTable().Entries {
- foundRequired := false
- for _, coreMount := range c.mounts.Entries {
- if coreMount.Type == requiredMount.Type {
- foundRequired = true
- break
- }
- }
-
- // In a replication scenario we will let sync invalidation take
- // care of creating a new required mount that doesn't exist yet.
- // This should only happen in the upgrade case where a new one is
- // introduced on the primary; otherwise initial bootstrapping will
- // ensure this comes over. If we upgrade first, we simply don't
- // create the mount, so we won't conflict when we sync. If this is
- // local (e.g. cubbyhole) we do still add it.
- if !foundRequired && (!c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) || requiredMount.Local) {
- c.mounts.Entries = append(c.mounts.Entries, requiredMount)
- needPersist = true
- }
- }
-
- // Upgrade to table-scoped entries
- for _, entry := range c.mounts.Entries {
- if entry.Type == cubbyholeMountType && !entry.Local {
- entry.Local = true
- needPersist = true
- }
- if entry.Table == "" {
- entry.Table = c.mounts.Type
- needPersist = true
- }
- if entry.Accessor == "" {
- accessor, err := c.generateMountAccessor(entry.Type)
- if err != nil {
- return err
- }
- entry.Accessor = accessor
- needPersist = true
- }
- if entry.BackendAwareUUID == "" {
- bUUID, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
- entry.BackendAwareUUID = bUUID
- needPersist = true
- }
-
- if entry.NamespaceID == "" {
- entry.NamespaceID = namespace.RootNamespaceID
- needPersist = true
- }
- ns, err := NamespaceByID(ctx, entry.NamespaceID, c)
- if err != nil {
- return err
- }
- if ns == nil {
- return namespace.ErrNoNamespace
- }
- entry.namespace = ns
-
- // Sync values to the cache
- entry.SyncCache()
- }
-
- // Done if we have restored the mount table and we don't need
- // to persist
- if !needPersist {
- return nil
- }
-
- // Persist both mount tables
- if err := c.persistMounts(ctx, c.mounts, nil); err != nil {
- c.logger.Error("failed to persist mount table", "error", err)
- return errLoadMountsFailed
- }
- return nil
-}
-
-// persistMounts is used to persist the mount table after modification
-func (c *Core) persistMounts(ctx context.Context, table *MountTable, local *bool) error {
- if table.Type != mountTableType {
- c.logger.Error("given table to persist has wrong type", "actual_type", table.Type, "expected_type", mountTableType)
- return fmt.Errorf("invalid table type given, not persisting")
- }
-
- for _, entry := range table.Entries {
- if entry.Table != table.Type {
- c.logger.Error("given entry to persist in mount table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
- return fmt.Errorf("invalid mount entry found, not persisting")
- }
- }
-
- nonLocalMounts := &MountTable{
- Type: mountTableType,
- }
-
- localMounts := &MountTable{
- Type: mountTableType,
- }
-
- for _, entry := range table.Entries {
- if entry.Local {
- localMounts.Entries = append(localMounts.Entries, entry)
- } else {
- nonLocalMounts.Entries = append(nonLocalMounts.Entries, entry)
- }
- }
-
- writeTable := func(mt *MountTable, path string) error {
- // Encode the mount table into JSON and compress it (lzw).
- compressedBytes, err := jsonutil.EncodeJSONAndCompress(mt, nil)
- if err != nil {
- c.logger.Error("failed to encode or compress mount table", "error", err)
- return err
- }
-
- // Create an entry
- entry := &Entry{
- Key: path,
- Value: compressedBytes,
- }
-
- // Write to the physical backend
- if err := c.barrier.Put(ctx, entry); err != nil {
- c.logger.Error("failed to persist mount table", "error", err)
- return err
- }
- return nil
- }
-
- var err error
- switch {
- case local == nil:
- // Write non-local mounts
- err := writeTable(nonLocalMounts, coreMountConfigPath)
- if err != nil {
- return err
- }
-
- // Write local mounts
- err = writeTable(localMounts, coreLocalMountConfigPath)
- if err != nil {
- return err
- }
- case *local:
- // Write local mounts
- err = writeTable(localMounts, coreLocalMountConfigPath)
- default:
- // Write non-local mounts
- err = writeTable(nonLocalMounts, coreMountConfigPath)
- }
-
- return err
-}
-
-// setupMounts is invoked after we've loaded the mount table to
-// initialize the logical backends and setup the router
-func (c *Core) setupMounts(ctx context.Context) error {
- c.mountsLock.Lock()
- defer c.mountsLock.Unlock()
-
- for _, entry := range c.mounts.sortEntriesByPathDepth().Entries {
- // Initialize the backend, special casing for system
- barrierPath := entry.ViewPath()
-
- // Create a barrier view using the UUID
- view := NewBarrierView(c.barrier, barrierPath)
-
- // Singleton mounts cannot be filtered on a per-secondary basis
- // from replication
- if strutil.StrListContains(singletonMounts, entry.Type) {
- addFilterablePath(c, barrierPath)
- }
-
- // Determining the replicated state of the mount
- nilMount, err := preprocessMount(c, entry, view)
- if err != nil {
- return err
- }
- origReadOnlyErr := view.getReadOnlyErr()
-
- // Mark the view as read-only until the mounting is complete and
- // ensure that it is reset after. This ensures that there will be no
- // writes during the construction of the backend.
- view.setReadOnlyErr(logical.ErrSetupReadOnly)
- if strutil.StrListContains(singletonMounts, entry.Type) {
- defer view.setReadOnlyErr(origReadOnlyErr)
- } else {
- c.postUnsealFuncs = append(c.postUnsealFuncs, func() {
- view.setReadOnlyErr(origReadOnlyErr)
- })
- }
-
- var backend logical.Backend
- // Create the new backend
- sysView := c.mountEntrySysView(entry)
- backend, err = c.newLogicalBackend(ctx, entry, sysView, view)
- if err != nil {
- c.logger.Error("failed to create mount entry", "path", entry.Path, "error", err)
- if !c.builtinRegistry.Contains(entry.Type, consts.PluginTypeSecrets) {
- // If we encounter an error instantiating the backend due to an error,
- // skip backend initialization but register the entry to the mount table
- // to preserve storage and path.
- c.logger.Warn("skipping plugin-based mount entry", "path", entry.Path)
- goto ROUTER_MOUNT
- }
- return errLoadMountsFailed
- }
- if backend == nil {
- return fmt.Errorf("created mount entry of type %q is nil", entry.Type)
- }
-
- {
- // Check for the correct backend type
- backendType := backend.Type()
-
- if backendType != logical.TypeLogical {
- if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" {
- return fmt.Errorf(`unknown backend type: "%s"`, entry.Type)
- }
- }
-
- addPathCheckers(c, entry, backend, barrierPath)
-
- c.setCoreBackend(entry, backend, view)
- }
-
- // If the mount is filtered or we are on a DR secondary we don't want to
- // keep the actual backend running, so we clean it up and set it to nil
- // so the router does not have a pointer to the object.
- if nilMount {
- backend.Cleanup(ctx)
- backend = nil
- }
-
- ROUTER_MOUNT:
- // Mount the backend
- err = c.router.Mount(backend, entry.Path, entry, view)
- if err != nil {
- c.logger.Error("failed to mount entry", "path", entry.Path, "error", err)
- return errLoadMountsFailed
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("successfully mounted backend", "type", entry.Type, "path", entry.Path)
- }
-
- // Ensure the path is tainted if set in the mount table
- if entry.Tainted {
- c.router.Taint(ctx, entry.Path)
- }
-
- // Ensure the cache is populated, don't need the result
- NamespaceByID(ctx, entry.NamespaceID, c)
- }
- return nil
-}
-
-// unloadMounts is used before we seal the vault to reset the mounts to
-// their unloaded state, calling Cleanup if defined. This is reversed by load and setup mounts.
-func (c *Core) unloadMounts(ctx context.Context) error {
- c.mountsLock.Lock()
- defer c.mountsLock.Unlock()
-
- if c.mounts != nil {
- mountTable := c.mounts.shallowClone()
- for _, e := range mountTable.Entries {
- backend := c.router.MatchingBackend(namespace.ContextWithNamespace(ctx, e.namespace), e.Path)
- if backend != nil {
- backend.Cleanup(ctx)
- }
-
- viewPath := e.ViewPath()
- removePathCheckers(c, e, viewPath)
- }
- }
-
- c.mounts = nil
- c.router = NewRouter()
- c.systemBarrierView = nil
- return nil
-}
-
-// newLogicalBackend is used to create and configure a new logical backend by name
-func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView logical.SystemView, view logical.Storage) (logical.Backend, error) {
- t := entry.Type
- if alias, ok := mountAliases[t]; ok {
- t = alias
- }
-
- f, ok := c.logicalBackends[t]
- if !ok {
- f = plugin.Factory
- }
-
- // Set up conf to pass in plugin_name
- conf := make(map[string]string, len(entry.Options)+1)
- for k, v := range entry.Options {
- conf[k] = v
- }
-
- switch {
- case entry.Type == "plugin":
- conf["plugin_name"] = entry.Config.PluginName
- default:
- conf["plugin_name"] = t
- }
-
- conf["plugin_type"] = consts.PluginTypeSecrets.String()
-
- backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor))
- c.AddLogger(backendLogger)
- config := &logical.BackendConfig{
- StorageView: view,
- Logger: backendLogger,
- Config: conf,
- System: sysView,
- BackendUUID: entry.BackendAwareUUID,
- }
-
- b, err := f(ctx, config)
- if err != nil {
- return nil, err
- }
- if b == nil {
- return nil, fmt.Errorf("nil backend of type %q returned from factory", t)
- }
- return b, nil
-}
-
-// mountEntrySysView creates a logical.SystemView from global and
-// mount-specific entries; because this should be called when setting
-// up a mountEntry, it doesn't check to ensure that me is not nil
-func (c *Core) mountEntrySysView(entry *MountEntry) logical.SystemView {
- return dynamicSystemView{
- core: c,
- mountEntry: entry,
- }
-}
-
-// defaultMountTable creates a default mount table
-func (c *Core) defaultMountTable() *MountTable {
- table := &MountTable{
- Type: mountTableType,
- }
- mountUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create default secret mount UUID: %v", err))
- }
- mountAccessor, err := c.generateMountAccessor("kv")
- if err != nil {
- panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err))
- }
- bUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create default secret mount backend UUID: %v", err))
- }
-
- kvMount := &MountEntry{
- Table: mountTableType,
- Path: "secret/",
- Type: "kv",
- Description: "key/value secret storage",
- UUID: mountUUID,
- Accessor: mountAccessor,
- BackendAwareUUID: bUUID,
- Options: map[string]string{
- "version": "1",
- },
- }
- if os.Getenv("VAULT_INTERACTIVE_DEMO_SERVER") != "" {
- kvMount.Options["version"] = "2"
- }
- table.Entries = append(table.Entries, kvMount)
- table.Entries = append(table.Entries, c.requiredMountTable().Entries...)
- return table
-}
-
-// requiredMountTable() creates a mount table with entries required
-// to be available
-func (c *Core) requiredMountTable() *MountTable {
- table := &MountTable{
- Type: mountTableType,
- }
- cubbyholeUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create cubbyhole UUID: %v", err))
- }
- cubbyholeAccessor, err := c.generateMountAccessor("cubbyhole")
- if err != nil {
- panic(fmt.Sprintf("could not generate cubbyhole accessor: %v", err))
- }
- cubbyholeBackendUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create cubbyhole backend UUID: %v", err))
- }
- cubbyholeMount := &MountEntry{
- Table: mountTableType,
- Path: cubbyholeMountPath,
- Type: cubbyholeMountType,
- Description: "per-token private secret storage",
- UUID: cubbyholeUUID,
- Accessor: cubbyholeAccessor,
- Local: true,
- BackendAwareUUID: cubbyholeBackendUUID,
- }
-
- sysUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create sys UUID: %v", err))
- }
- sysAccessor, err := c.generateMountAccessor("system")
- if err != nil {
- panic(fmt.Sprintf("could not generate sys accessor: %v", err))
- }
- sysBackendUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create sys backend UUID: %v", err))
- }
- sysMount := &MountEntry{
- Table: mountTableType,
- Path: "sys/",
- Type: systemMountType,
- Description: "system endpoints used for control, policy and debugging",
- UUID: sysUUID,
- Accessor: sysAccessor,
- BackendAwareUUID: sysBackendUUID,
- }
-
- identityUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create identity mount entry UUID: %v", err))
- }
- identityAccessor, err := c.generateMountAccessor("identity")
- if err != nil {
- panic(fmt.Sprintf("could not generate identity accessor: %v", err))
- }
- identityBackendUUID, err := uuid.GenerateUUID()
- if err != nil {
- panic(fmt.Sprintf("could not create identity backend UUID: %v", err))
- }
- identityMount := &MountEntry{
- Table: mountTableType,
- Path: "identity/",
- Type: "identity",
- Description: "identity store",
- UUID: identityUUID,
- Accessor: identityAccessor,
- BackendAwareUUID: identityBackendUUID,
- }
-
- table.Entries = append(table.Entries, cubbyholeMount)
- table.Entries = append(table.Entries, sysMount)
- table.Entries = append(table.Entries, identityMount)
-
- return table
-}
-
-// This function returns tables that are singletons. The main usage of this is
-// for replication, so we can send over mount info (especially, UUIDs of
-// mounts, which are used for salts) for mounts that may not be able to be
-// handled normally. After saving these values on the secondary, we let normal
-// sync invalidation do its thing. Because of its use for replication, we
-// exclude local mounts.
-func (c *Core) singletonMountTables() (mounts, auth *MountTable) {
- mounts = &MountTable{}
- auth = &MountTable{}
-
- c.mountsLock.RLock()
- for _, entry := range c.mounts.Entries {
- if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local && entry.Namespace().ID == namespace.RootNamespaceID {
- mounts.Entries = append(mounts.Entries, entry)
- }
- }
- c.mountsLock.RUnlock()
-
- c.authLock.RLock()
- for _, entry := range c.auth.Entries {
- if strutil.StrListContains(singletonMounts, entry.Type) && !entry.Local && entry.Namespace().ID == namespace.RootNamespaceID {
- auth.Entries = append(auth.Entries, entry)
- }
- }
- c.authLock.RUnlock()
-
- return
-}
-
-func (c *Core) setCoreBackend(entry *MountEntry, backend logical.Backend, view *BarrierView) {
- switch entry.Type {
- case systemMountType:
- c.systemBackend = backend.(*SystemBackend)
- c.systemBarrierView = view
- case cubbyholeMountType:
- ch := backend.(*CubbyholeBackend)
- ch.saltUUID = entry.UUID
- ch.storageView = view
- c.cubbyholeBackend = ch
- case identityMountType:
- c.identityStore = backend.(*IdentityStore)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/mount_util.go b/vendor/github.com/hashicorp/vault/vault/mount_util.go
deleted file mode 100644
index 66ffb98e..00000000
--- a/vendor/github.com/hashicorp/vault/vault/mount_util.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
- "path"
-
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
-)
-
-func addPathCheckers(*Core, *MountEntry, logical.Backend, string) {}
-func removePathCheckers(*Core, *MountEntry, string) {}
-func addAuditPathChecker(*Core, *MountEntry, *BarrierView, string) {}
-func removeAuditPathChecker(*Core, *MountEntry) {}
-func addFilterablePath(*Core, string) {}
-func preprocessMount(*Core, *MountEntry, *BarrierView) (bool, error) { return false, nil }
-func clearIgnoredPaths(context.Context, *Core, logical.Backend, string) error { return nil }
-
-// ViewPath returns storage prefix for the view
-func (e *MountEntry) ViewPath() string {
- switch e.Type {
- case systemMountType:
- return systemBarrierPrefix
- case "token":
- return path.Join(systemBarrierPrefix, tokenSubPath) + "/"
- }
-
- switch e.Table {
- case mountTableType:
- return backendBarrierPrefix + e.UUID + "/"
- case credentialTableType:
- return credentialBarrierPrefix + e.UUID + "/"
- case auditTableType:
- return auditBarrierPrefix + e.UUID + "/"
- }
-
- panic("invalid mount entry")
-}
-
-func verifyNamespace(*Core, *namespace.Namespace, *MountEntry) error { return nil }
diff --git a/vendor/github.com/hashicorp/vault/vault/namespaces.go b/vendor/github.com/hashicorp/vault/vault/namespaces.go
deleted file mode 100644
index 5b9f31b9..00000000
--- a/vendor/github.com/hashicorp/vault/vault/namespaces.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package vault
-
-import (
- "context"
-
- "github.com/hashicorp/vault/helper/namespace"
-)
-
-var (
- NamespaceByID func(context.Context, string, *Core) (*namespace.Namespace, error) = namespaceByID
-)
-
-func namespaceByID(ctx context.Context, nsID string, c *Core) (*namespace.Namespace, error) {
- if nsID == namespace.RootNamespaceID {
- return namespace.RootNamespace, nil
- }
- return nil, namespace.ErrNoNamespace
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go b/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go
deleted file mode 100644
index b81b024a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/plugin_catalog.go
+++ /dev/null
@@ -1,368 +0,0 @@
-package vault
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "path/filepath"
- "sort"
- "strings"
- "sync"
-
- log "github.com/hashicorp/go-hclog"
- multierror "github.com/hashicorp/go-multierror"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/pluginutil"
- "github.com/hashicorp/vault/logical"
- backendplugin "github.com/hashicorp/vault/logical/plugin"
-)
-
-var (
- pluginCatalogPath = "core/plugin-catalog/"
- ErrDirectoryNotConfigured = errors.New("could not set plugin, plugin directory is not configured")
- ErrPluginNotFound = errors.New("plugin not found in the catalog")
- ErrPluginBadType = errors.New("unable to determine plugin type")
-)
-
-// PluginCatalog keeps a record of plugins known to vault. External plugins need
-// to be registered to the catalog before they can be used in backends. Builtin
-// plugins are automatically detected and included in the catalog.
-type PluginCatalog struct {
- builtinRegistry BuiltinRegistry
- catalogView *BarrierView
- directory string
-
- lock sync.RWMutex
-}
-
-func (c *Core) setupPluginCatalog(ctx context.Context) error {
- c.pluginCatalog = &PluginCatalog{
- builtinRegistry: c.builtinRegistry,
- catalogView: NewBarrierView(c.barrier, pluginCatalogPath),
- directory: c.pluginDirectory,
- }
-
- // Run upgrade if untyped plugins exist
- err := c.pluginCatalog.UpgradePlugins(ctx, c.logger)
- if err != nil {
- c.logger.Error("error while upgrading plugin storage", "error", err)
- }
-
- if c.logger.IsInfo() {
- c.logger.Info("successfully setup plugin catalog", "plugin-directory", c.pluginDirectory)
- }
-
- return nil
-}
-
-// getPluginTypeFromUnknown will attempt to run the plugin to determine the
-// type. It will first attempt to run as a database plugin then a backend
-// plugin. Both of these will be run in metadata mode.
-func (c *PluginCatalog) getPluginTypeFromUnknown(ctx context.Context, plugin *pluginutil.PluginRunner) (consts.PluginType, error) {
- {
- // Attempt to run as database plugin
- client, err := dbplugin.NewPluginClient(ctx, nil, plugin, log.NewNullLogger(), true)
- if err == nil {
- // Close the client and cleanup the plugin process
- client.Close()
- return consts.PluginTypeDatabase, nil
- }
- }
-
- {
- // Attempt to run as backend plugin
- client, err := backendplugin.NewPluginClient(ctx, nil, plugin, log.NewNullLogger(), true)
- if err == nil {
- err := client.Setup(ctx, &logical.BackendConfig{})
- if err != nil {
- return consts.PluginTypeUnknown, err
- }
-
- backendType := client.Type()
- client.Cleanup(ctx)
-
- switch backendType {
- case logical.TypeCredential:
- return consts.PluginTypeCredential, nil
- case logical.TypeLogical:
- return consts.PluginTypeSecrets, nil
- }
- }
- }
-
- return consts.PluginTypeUnknown, nil
-}
-
-// UpdatePlugins will loop over all the plugins of unknown type and attempt to
-// upgrade them to typed plugins
-func (c *PluginCatalog) UpgradePlugins(ctx context.Context, logger log.Logger) error {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // If the directory isn't set we can skip the upgrade attempt
- if c.directory == "" {
- return nil
- }
-
- // List plugins from old location
- pluginsRaw, err := c.catalogView.List(ctx, "")
- if err != nil {
- return err
- }
- plugins := make([]string, 0, len(pluginsRaw))
- for _, p := range pluginsRaw {
- if !strings.HasSuffix(p, "/") {
- plugins = append(plugins, p)
- }
- }
-
- logger.Info("upgrading plugin information", "plugins", plugins)
-
- var retErr error
- for _, pluginName := range plugins {
- pluginRaw, err := c.catalogView.Get(ctx, pluginName)
- if err != nil {
- retErr = multierror.Append(errwrap.Wrapf("failed to load plugin entry: {{err}}", err))
- continue
- }
-
- plugin := new(pluginutil.PluginRunner)
- if err := jsonutil.DecodeJSON(pluginRaw.Value, plugin); err != nil {
- retErr = multierror.Append(errwrap.Wrapf("failed to decode plugin entry: {{err}}", err))
- continue
- }
-
- // prepend the plugin directory to the command
- cmdOld := plugin.Command
- plugin.Command = filepath.Join(c.directory, plugin.Command)
-
- pluginType, err := c.getPluginTypeFromUnknown(ctx, plugin)
- if err != nil {
- retErr = multierror.Append(retErr, fmt.Errorf("could not upgrade plugin %s: %s", pluginName, err))
- continue
- }
- if pluginType == consts.PluginTypeUnknown {
- retErr = multierror.Append(retErr, fmt.Errorf("could not upgrade plugin %s: plugin of unknown type", pluginName))
- continue
- }
-
- // Upgrade the storage
- err = c.setInternal(ctx, pluginName, pluginType, cmdOld, plugin.Args, plugin.Env, plugin.Sha256)
- if err != nil {
- retErr = multierror.Append(retErr, fmt.Errorf("could not upgrade plugin %s: %s", pluginName, err))
- continue
- }
-
- err = c.catalogView.Delete(ctx, pluginName)
- if err != nil {
- logger.Error("could not remove plugin", "plugin", pluginName, "error", err)
- }
-
- logger.Info("upgraded plugin type", "plugin", pluginName, "type", pluginType.String())
- }
-
- return retErr
-}
-
-// Get retrieves a plugin with the specified name from the catalog. It first
-// looks for external plugins with this name and then looks for builtin plugins.
-// It returns a PluginRunner or an error if no plugin was found.
-func (c *PluginCatalog) Get(ctx context.Context, name string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) {
- c.lock.RLock()
- defer c.lock.RUnlock()
-
- // If the directory isn't set only look for builtin plugins.
- if c.directory != "" {
- // Look for external plugins in the barrier
- out, err := c.catalogView.Get(ctx, pluginType.String()+"/"+name)
- if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to retrieve plugin %q: {{err}}", name), err)
- }
- if out == nil {
- // Also look for external plugins under what their name would have been if they
- // were registered before plugin types existed.
- out, err = c.catalogView.Get(ctx, name)
- if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to retrieve plugin %q: {{err}}", name), err)
- }
- }
- if out != nil {
- entry := new(pluginutil.PluginRunner)
- if err := jsonutil.DecodeJSON(out.Value, entry); err != nil {
- return nil, errwrap.Wrapf("failed to decode plugin entry: {{err}}", err)
- }
- if entry.Type != pluginType && entry.Type != consts.PluginTypeUnknown {
- return nil, nil
- }
-
- // prepend the plugin directory to the command
- entry.Command = filepath.Join(c.directory, entry.Command)
-
- return entry, nil
- }
- }
- // Look for builtin plugins
- if factory, ok := c.builtinRegistry.Get(name, pluginType); ok {
- return &pluginutil.PluginRunner{
- Name: name,
- Type: pluginType,
- Builtin: true,
- BuiltinFactory: factory,
- }, nil
- }
-
- return nil, nil
-}
-
-// Set registers a new external plugin with the catalog, or updates an existing
-// external plugin. It takes the name, command and SHA256 of the plugin.
-func (c *PluginCatalog) Set(ctx context.Context, name string, pluginType consts.PluginType, command string, args []string, env []string, sha256 []byte) error {
- if c.directory == "" {
- return ErrDirectoryNotConfigured
- }
-
- switch {
- case strings.Contains(name, ".."):
- fallthrough
- case strings.Contains(command, ".."):
- return consts.ErrPathContainsParentReferences
- }
-
- c.lock.Lock()
- defer c.lock.Unlock()
-
- return c.setInternal(ctx, name, pluginType, command, args, env, sha256)
-}
-
-func (c *PluginCatalog) setInternal(ctx context.Context, name string, pluginType consts.PluginType, command string, args []string, env []string, sha256 []byte) error {
- // Best effort check to make sure the command isn't breaking out of the
- // configured plugin directory.
- commandFull := filepath.Join(c.directory, command)
- sym, err := filepath.EvalSymlinks(commandFull)
- if err != nil {
- return errwrap.Wrapf("error while validating the command path: {{err}}", err)
- }
- symAbs, err := filepath.Abs(filepath.Dir(sym))
- if err != nil {
- return errwrap.Wrapf("error while validating the command path: {{err}}", err)
- }
-
- if symAbs != c.directory {
- return errors.New("can not execute files outside of configured plugin directory")
- }
-
- // If the plugin type is unknown, we want to attempt to determine the type
- if pluginType == consts.PluginTypeUnknown {
- // entryTmp should only be used for the below type check, it uses the
- // full command instead of the relative command.
- entryTmp := &pluginutil.PluginRunner{
- Name: name,
- Command: commandFull,
- Args: args,
- Env: env,
- Sha256: sha256,
- Builtin: false,
- }
-
- pluginType, err = c.getPluginTypeFromUnknown(ctx, entryTmp)
- if err != nil || pluginType == consts.PluginTypeUnknown {
- return ErrPluginBadType
- }
- }
-
- entry := &pluginutil.PluginRunner{
- Name: name,
- Type: pluginType,
- Command: command,
- Args: args,
- Env: env,
- Sha256: sha256,
- Builtin: false,
- }
-
- buf, err := json.Marshal(entry)
- if err != nil {
- return errwrap.Wrapf("failed to encode plugin entry: {{err}}", err)
- }
-
- logicalEntry := logical.StorageEntry{
- Key: pluginType.String() + "/" + name,
- Value: buf,
- }
- if err := c.catalogView.Put(ctx, &logicalEntry); err != nil {
- return errwrap.Wrapf("failed to persist plugin entry: {{err}}", err)
- }
- return nil
-}
-
-// Delete is used to remove an external plugin from the catalog. Builtin plugins
-// can not be deleted.
-func (c *PluginCatalog) Delete(ctx context.Context, name string, pluginType consts.PluginType) error {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Check the name under which the plugin exists, but if it's unfound, don't return any error.
- pluginKey := pluginType.String() + "/" + name
- out, err := c.catalogView.Get(ctx, pluginKey)
- if err != nil || out == nil {
- pluginKey = name
- }
-
- return c.catalogView.Delete(ctx, pluginKey)
-}
-
-// List returns a list of all the known plugin names. If an external and builtin
-// plugin share the same name, only one instance of the name will be returned.
-func (c *PluginCatalog) List(ctx context.Context, pluginType consts.PluginType) ([]string, error) {
- c.lock.RLock()
- defer c.lock.RUnlock()
-
- // Collect keys for external plugins in the barrier.
- keys, err := logical.CollectKeys(ctx, c.catalogView)
- if err != nil {
- return nil, err
- }
-
- // Get the builtin plugins.
- builtinKeys := c.builtinRegistry.Keys(pluginType)
-
- // Use a map to unique the two lists.
- mapKeys := make(map[string]bool)
-
- pluginTypePrefix := pluginType.String() + "/"
-
- for _, plugin := range keys {
-
- // Only list user-added plugins if they're of the given type.
- if entry, err := c.Get(ctx, plugin, pluginType); err == nil && entry != nil {
-
- // Some keys will be prepended with the plugin type, but other ones won't.
- // Users don't expect to see the plugin type, so we need to strip that here.
- idx := strings.Index(plugin, pluginTypePrefix)
- if idx == 0 {
- plugin = plugin[len(pluginTypePrefix):]
- }
- mapKeys[plugin] = true
- }
- }
-
- for _, plugin := range builtinKeys {
- mapKeys[plugin] = true
- }
-
- retList := make([]string, len(mapKeys))
- i := 0
- for k := range mapKeys {
- retList[i] = k
- i++
- }
- // sort for consistent ordering of builtin plugins
- sort.Strings(retList)
-
- return retList, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/plugin_reload.go b/vendor/github.com/hashicorp/vault/vault/plugin_reload.go
deleted file mode 100644
index fdd095cd..00000000
--- a/vendor/github.com/hashicorp/vault/vault/plugin_reload.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/hashicorp/vault/helper/namespace"
-
- "github.com/hashicorp/errwrap"
- multierror "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-// reloadPluginMounts reloads provided mounts, regardless of
-// plugin name, as long as the backend type is plugin.
-func (c *Core) reloadMatchingPluginMounts(ctx context.Context, mounts []string) error {
- c.mountsLock.RLock()
- defer c.mountsLock.RUnlock()
- c.authLock.RLock()
- defer c.authLock.RUnlock()
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
-
- var errors error
- for _, mount := range mounts {
- entry := c.router.MatchingMountEntry(ctx, mount)
- if entry == nil {
- errors = multierror.Append(errors, fmt.Errorf("cannot fetch mount entry on %q", mount))
- continue
- }
-
- var isAuth bool
- fullPath := c.router.MatchingMount(ctx, mount)
- if strings.HasPrefix(fullPath, credentialRoutePrefix) {
- isAuth = true
- }
-
- // We dont reload mounts that are not in the same namespace
- if ns.ID != entry.Namespace().ID {
- continue
- }
-
- err := c.reloadBackendCommon(ctx, entry, isAuth)
- if err != nil {
- errors = multierror.Append(errors, errwrap.Wrapf(fmt.Sprintf("cannot reload plugin on %q: {{err}}", mount), err))
- continue
- }
- c.logger.Info("successfully reloaded plugin", "plugin", entry.Type, "path", entry.Path)
- }
- return errors
-}
-
-// reloadPlugin reloads all mounted backends that are of
-// plugin pluginName (name of the plugin as registered in
-// the plugin catalog).
-func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) error {
- c.mountsLock.RLock()
- defer c.mountsLock.RUnlock()
- c.authLock.RLock()
- defer c.authLock.RUnlock()
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
-
- // Filter mount entries that only matches the plugin name
- for _, entry := range c.mounts.Entries {
- // We dont reload mounts that are not in the same namespace
- if ns.ID != entry.Namespace().ID {
- continue
- }
- if entry.Type == pluginName || (entry.Type == "plugin" && entry.Config.PluginName == pluginName) {
- err := c.reloadBackendCommon(ctx, entry, false)
- if err != nil {
- return err
- }
- c.logger.Info("successfully reloaded plugin", "plugin", pluginName, "path", entry.Path)
- }
- }
-
- // Filter auth mount entries that ony matches the plugin name
- for _, entry := range c.auth.Entries {
- // We dont reload mounts that are not in the same namespace
- if ns.ID != entry.Namespace().ID {
- continue
- }
-
- if entry.Type == pluginName || (entry.Type == "plugin" && entry.Config.PluginName == pluginName) {
- err := c.reloadBackendCommon(ctx, entry, true)
- if err != nil {
- return err
- }
- c.logger.Info("successfully reloaded plugin", "plugin", pluginName, "path", entry.Path)
- }
- }
-
- return nil
-}
-
-// reloadBackendCommon is a generic method to reload a backend provided a
-// MountEntry.
-func (c *Core) reloadBackendCommon(ctx context.Context, entry *MountEntry, isAuth bool) error {
- // We don't want to reload the singleton mounts. They often have specific
- // inmemory elements and we don't want to touch them here.
- if strutil.StrListContains(singletonMounts, entry.Type) {
- c.logger.Debug("skipping reload of singleton mount", "type", entry.Type)
- return nil
- }
-
- path := entry.Path
-
- if isAuth {
- path = credentialRoutePrefix + path
- }
-
- // Fast-path out if the backend doesn't exist
- raw, ok := c.router.root.Get(path)
- if !ok {
- return nil
- }
-
- re := raw.(*routeEntry)
-
- // Grab the lock, this allows requests to drain before we cleanup the
- // client.
- re.l.Lock()
- defer re.l.Unlock()
-
- // Only call Cleanup if backend is initialized
- if re.backend != nil {
- // Call backend's Cleanup routine
- re.backend.Cleanup(ctx)
- }
-
- view := re.storageView
- viewPath := entry.UUID + "/"
- switch entry.Table {
- case mountTableType:
- viewPath = backendBarrierPrefix + viewPath
- case credentialTableType:
- viewPath = credentialBarrierPrefix + viewPath
- }
-
- removePathCheckers(c, entry, viewPath)
-
- sysView := c.mountEntrySysView(entry)
-
- nilMount, err := preprocessMount(c, entry, view.(*BarrierView))
- if err != nil {
- return err
- }
-
- var backend logical.Backend
- if !isAuth {
- // Dispense a new backend
- backend, err = c.newLogicalBackend(ctx, entry, sysView, view)
- } else {
- backend, err = c.newCredentialBackend(ctx, entry, sysView, view)
- }
- if err != nil {
- return err
- }
- if backend == nil {
- return fmt.Errorf("nil backend of type %q returned from creation function", entry.Type)
- }
-
- addPathCheckers(c, entry, backend, viewPath)
-
- if nilMount {
- backend.Cleanup(ctx)
- backend = nil
- }
-
- // Set the backend back
- re.backend = backend
-
- if backend != nil {
- // Set paths as well
- paths := backend.SpecialPaths()
- if paths != nil {
- re.rootPaths.Store(pathsToRadix(paths.Root))
- re.loginPaths.Store(pathsToRadix(paths.Unauthenticated))
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy.go b/vendor/github.com/hashicorp/vault/vault/policy.go
deleted file mode 100644
index 7bf448a3..00000000
--- a/vendor/github.com/hashicorp/vault/vault/policy.go
+++ /dev/null
@@ -1,461 +0,0 @@
-package vault
-
-import (
- "errors"
- "fmt"
- "strings"
- "time"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/hcl"
- "github.com/hashicorp/hcl/hcl/ast"
- "github.com/hashicorp/vault/helper/hclutil"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/mitchellh/copystructure"
-)
-
-const (
- DenyCapability = "deny"
- CreateCapability = "create"
- ReadCapability = "read"
- UpdateCapability = "update"
- DeleteCapability = "delete"
- ListCapability = "list"
- SudoCapability = "sudo"
- RootCapability = "root"
-
- // Backwards compatibility
- OldDenyPathPolicy = "deny"
- OldReadPathPolicy = "read"
- OldWritePathPolicy = "write"
- OldSudoPathPolicy = "sudo"
-)
-
-const (
- DenyCapabilityInt uint32 = 1 << iota
- CreateCapabilityInt
- ReadCapabilityInt
- UpdateCapabilityInt
- DeleteCapabilityInt
- ListCapabilityInt
- SudoCapabilityInt
-)
-
-type PolicyType uint32
-
-const (
- PolicyTypeACL PolicyType = iota
- PolicyTypeRGP
- PolicyTypeEGP
-
- // Triggers a lookup in the map to figure out if ACL or RGP
- PolicyTypeToken
-)
-
-func (p PolicyType) String() string {
- switch p {
- case PolicyTypeACL:
- return "acl"
- case PolicyTypeRGP:
- return "rgp"
- case PolicyTypeEGP:
- return "egp"
- }
-
- return ""
-}
-
-var (
- cap2Int = map[string]uint32{
- DenyCapability: DenyCapabilityInt,
- CreateCapability: CreateCapabilityInt,
- ReadCapability: ReadCapabilityInt,
- UpdateCapability: UpdateCapabilityInt,
- DeleteCapability: DeleteCapabilityInt,
- ListCapability: ListCapabilityInt,
- SudoCapability: SudoCapabilityInt,
- }
-)
-
-type egpPath struct {
- Path string `json:"path"`
- Glob bool `json:"glob"`
-}
-
-// Policy is used to represent the policy specified by an ACL configuration.
-type Policy struct {
- sentinelPolicy
- Name string `hcl:"name"`
- Paths []*PathRules `hcl:"-"`
- Raw string
- Type PolicyType
- Templated bool
- namespace *namespace.Namespace
-}
-
-// ShallowClone returns a shallow clone of the policy. This should not be used
-// if any of the reference-typed fields are going to be modified
-func (p *Policy) ShallowClone() *Policy {
- return &Policy{
- sentinelPolicy: p.sentinelPolicy,
- Name: p.Name,
- Paths: p.Paths,
- Raw: p.Raw,
- Type: p.Type,
- namespace: p.namespace,
- }
-}
-
-// PathRules represents a policy for a path in the namespace.
-type PathRules struct {
- Prefix string
- Policy string
- Permissions *ACLPermissions
- Glob bool
- Capabilities []string
-
- // These keys are used at the top level to make the HCL nicer; we store in
- // the ACLPermissions object though
- MinWrappingTTLHCL interface{} `hcl:"min_wrapping_ttl"`
- MaxWrappingTTLHCL interface{} `hcl:"max_wrapping_ttl"`
- AllowedParametersHCL map[string][]interface{} `hcl:"allowed_parameters"`
- DeniedParametersHCL map[string][]interface{} `hcl:"denied_parameters"`
- RequiredParametersHCL []string `hcl:"required_parameters"`
- MFAMethodsHCL []string `hcl:"mfa_methods"`
- ControlGroupHCL *ControlGroupHCL `hcl:"control_group"`
-}
-
-type ControlGroupHCL struct {
- TTL interface{} `hcl:"ttl"`
- Factors map[string]*ControlGroupFactor `hcl:"factor"`
-}
-
-type ControlGroup struct {
- TTL time.Duration
- Factors []*ControlGroupFactor
-}
-
-type ControlGroupFactor struct {
- Name string
- Identity *IdentityFactor `hcl:"identity"`
-}
-
-type IdentityFactor struct {
- GroupIDs []string `hcl:"group_ids"`
- GroupNames []string `hcl:"group_names"`
- ApprovalsRequired int `hcl:"approvals"`
-}
-
-type ACLPermissions struct {
- CapabilitiesBitmap uint32
- MinWrappingTTL time.Duration
- MaxWrappingTTL time.Duration
- AllowedParameters map[string][]interface{}
- DeniedParameters map[string][]interface{}
- RequiredParameters []string
- MFAMethods []string
- ControlGroup *ControlGroup
-}
-
-func (p *ACLPermissions) Clone() (*ACLPermissions, error) {
- ret := &ACLPermissions{
- CapabilitiesBitmap: p.CapabilitiesBitmap,
- MinWrappingTTL: p.MinWrappingTTL,
- MaxWrappingTTL: p.MaxWrappingTTL,
- RequiredParameters: p.RequiredParameters[:],
- }
-
- switch {
- case p.AllowedParameters == nil:
- case len(p.AllowedParameters) == 0:
- ret.AllowedParameters = make(map[string][]interface{})
- default:
- clonedAllowed, err := copystructure.Copy(p.AllowedParameters)
- if err != nil {
- return nil, err
- }
- ret.AllowedParameters = clonedAllowed.(map[string][]interface{})
- }
-
- switch {
- case p.DeniedParameters == nil:
- case len(p.DeniedParameters) == 0:
- ret.DeniedParameters = make(map[string][]interface{})
- default:
- clonedDenied, err := copystructure.Copy(p.DeniedParameters)
- if err != nil {
- return nil, err
- }
- ret.DeniedParameters = clonedDenied.(map[string][]interface{})
- }
-
- switch {
- case p.MFAMethods == nil:
- case len(p.MFAMethods) == 0:
- ret.MFAMethods = []string{}
- default:
- clonedMFAMethods, err := copystructure.Copy(p.MFAMethods)
- if err != nil {
- return nil, err
- }
- ret.MFAMethods = clonedMFAMethods.([]string)
- }
-
- switch {
- case p.ControlGroup == nil:
- default:
- clonedControlGroup, err := copystructure.Copy(p.ControlGroup)
- if err != nil {
- return nil, err
- }
- ret.ControlGroup = clonedControlGroup.(*ControlGroup)
- }
-
- return ret, nil
-}
-
-// ParseACLPolicy is used to parse the specified ACL rules into an
-// intermediary set of policies, before being compiled into
-// the ACL
-func ParseACLPolicy(ns *namespace.Namespace, rules string) (*Policy, error) {
- return parseACLPolicyWithTemplating(ns, rules, false, nil, nil)
-}
-
-// parseACLPolicyWithTemplating performs the actual work and checks whether we
-// should perform substitutions. If performTemplating is true we know that it
-// is templated so we don't check again, otherwise we check to see if it's a
-// templated policy.
-func parseACLPolicyWithTemplating(ns *namespace.Namespace, rules string, performTemplating bool, entity *identity.Entity, groups []*identity.Group) (*Policy, error) {
- // Parse the rules
- root, err := hcl.Parse(rules)
- if err != nil {
- return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
- }
-
- // Top-level item should be the object list
- list, ok := root.Node.(*ast.ObjectList)
- if !ok {
- return nil, fmt.Errorf("failed to parse policy: does not contain a root object")
- }
-
- // Check for invalid top-level keys
- valid := []string{
- "name",
- "path",
- }
- if err := hclutil.CheckHCLKeys(list, valid); err != nil {
- return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
- }
-
- // Create the initial policy and store the raw text of the rules
- p := Policy{
- Raw: rules,
- Type: PolicyTypeACL,
- namespace: ns,
- }
- if err := hcl.DecodeObject(&p, list); err != nil {
- return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
- }
-
- if o := list.Filter("path"); len(o.Items) > 0 {
- if err := parsePaths(&p, o, performTemplating, entity, groups); err != nil {
- return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
- }
- }
-
- return &p, nil
-}
-
-func parsePaths(result *Policy, list *ast.ObjectList, performTemplating bool, entity *identity.Entity, groups []*identity.Group) error {
- paths := make([]*PathRules, 0, len(list.Items))
- for _, item := range list.Items {
- key := "path"
- if len(item.Keys) > 0 {
- key = item.Keys[0].Token.Value().(string)
- }
-
- // Check the path
- if performTemplating {
- _, templated, err := identity.PopulateString(&identity.PopulateStringInput{
- String: key,
- Entity: entity,
- Groups: groups,
- Namespace: result.namespace,
- })
- if err != nil {
- continue
- }
- key = templated
- } else {
- hasTemplating, _, err := identity.PopulateString(&identity.PopulateStringInput{
- ValidityCheckOnly: true,
- String: key,
- })
- if err != nil {
- return errwrap.Wrapf("failed to validate policy templating: {{err}}", err)
- }
- if hasTemplating {
- result.Templated = true
- }
- }
-
- valid := []string{
- "comment",
- "policy",
- "capabilities",
- "allowed_parameters",
- "denied_parameters",
- "required_parameters",
- "min_wrapping_ttl",
- "max_wrapping_ttl",
- "mfa_methods",
- "control_group",
- }
- if err := hclutil.CheckHCLKeys(item.Val, valid); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("path %q:", key))
- }
-
- var pc PathRules
-
- // allocate memory so that DecodeObject can initialize the ACLPermissions struct
- pc.Permissions = new(ACLPermissions)
-
- pc.Prefix = key
-
- if err := hcl.DecodeObject(&pc, item.Val); err != nil {
- return multierror.Prefix(err, fmt.Sprintf("path %q:", key))
- }
-
- // Strip a leading '/' as paths in Vault start after the / in the API path
- if len(pc.Prefix) > 0 && pc.Prefix[0] == '/' {
- pc.Prefix = pc.Prefix[1:]
- }
-
- // Ensure we are using the full request path internally
- pc.Prefix = result.namespace.Path + pc.Prefix
-
- // Strip the glob character if found
- if strings.HasSuffix(pc.Prefix, "*") {
- pc.Prefix = strings.TrimSuffix(pc.Prefix, "*")
- pc.Glob = true
- }
-
- // Map old-style policies into capabilities
- if len(pc.Policy) > 0 {
- switch pc.Policy {
- case OldDenyPathPolicy:
- pc.Capabilities = []string{DenyCapability}
- case OldReadPathPolicy:
- pc.Capabilities = append(pc.Capabilities, []string{ReadCapability, ListCapability}...)
- case OldWritePathPolicy:
- pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability}...)
- case OldSudoPathPolicy:
- pc.Capabilities = append(pc.Capabilities, []string{CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability}...)
- default:
- return fmt.Errorf("path %q: invalid policy %q", key, pc.Policy)
- }
- }
-
- // Initialize the map
- pc.Permissions.CapabilitiesBitmap = 0
- for _, cap := range pc.Capabilities {
- switch cap {
- // If it's deny, don't include any other capability
- case DenyCapability:
- pc.Capabilities = []string{DenyCapability}
- pc.Permissions.CapabilitiesBitmap = DenyCapabilityInt
- goto PathFinished
- case CreateCapability, ReadCapability, UpdateCapability, DeleteCapability, ListCapability, SudoCapability:
- pc.Permissions.CapabilitiesBitmap |= cap2Int[cap]
- default:
- return fmt.Errorf("path %q: invalid capability %q", key, cap)
- }
- }
-
- if pc.AllowedParametersHCL != nil {
- pc.Permissions.AllowedParameters = make(map[string][]interface{}, len(pc.AllowedParametersHCL))
- for key, val := range pc.AllowedParametersHCL {
- pc.Permissions.AllowedParameters[strings.ToLower(key)] = val
- }
- }
- if pc.DeniedParametersHCL != nil {
- pc.Permissions.DeniedParameters = make(map[string][]interface{}, len(pc.DeniedParametersHCL))
-
- for key, val := range pc.DeniedParametersHCL {
- pc.Permissions.DeniedParameters[strings.ToLower(key)] = val
- }
- }
- if pc.MinWrappingTTLHCL != nil {
- dur, err := parseutil.ParseDurationSecond(pc.MinWrappingTTLHCL)
- if err != nil {
- return errwrap.Wrapf("error parsing min_wrapping_ttl: {{err}}", err)
- }
- pc.Permissions.MinWrappingTTL = dur
- }
- if pc.MaxWrappingTTLHCL != nil {
- dur, err := parseutil.ParseDurationSecond(pc.MaxWrappingTTLHCL)
- if err != nil {
- return errwrap.Wrapf("error parsing max_wrapping_ttl: {{err}}", err)
- }
- pc.Permissions.MaxWrappingTTL = dur
- }
- if pc.MFAMethodsHCL != nil {
- pc.Permissions.MFAMethods = make([]string, len(pc.MFAMethodsHCL))
- for idx, item := range pc.MFAMethodsHCL {
- pc.Permissions.MFAMethods[idx] = item
- }
- }
- if pc.ControlGroupHCL != nil {
- pc.Permissions.ControlGroup = new(ControlGroup)
- if pc.ControlGroupHCL.TTL != nil {
- dur, err := parseutil.ParseDurationSecond(pc.ControlGroupHCL.TTL)
- if err != nil {
- return errwrap.Wrapf("error parsing control group max ttl: {{err}}", err)
- }
- pc.Permissions.ControlGroup.TTL = dur
- }
-
- var factors []*ControlGroupFactor
- if pc.ControlGroupHCL.Factors != nil {
- for key, factor := range pc.ControlGroupHCL.Factors {
- // Although we only have one factor here, we need to check to make sure there is at least
- // one factor defined in this factor block.
- if factor.Identity == nil {
- return errors.New("no control_group factor provided")
- }
-
- if factor.Identity.ApprovalsRequired <= 0 ||
- (len(factor.Identity.GroupIDs) == 0 && len(factor.Identity.GroupNames) == 0) {
- return errors.New("must provide more than one identity group and approvals > 0")
- }
-
- factors = append(factors, &ControlGroupFactor{
- Name: key,
- Identity: factor.Identity,
- })
- }
- }
- if len(factors) == 0 {
- return errors.New("no control group factors provided")
- }
- pc.Permissions.ControlGroup.Factors = factors
- }
- if pc.Permissions.MinWrappingTTL != 0 &&
- pc.Permissions.MaxWrappingTTL != 0 &&
- pc.Permissions.MaxWrappingTTL < pc.Permissions.MinWrappingTTL {
- return errors.New("max_wrapping_ttl cannot be less than min_wrapping_ttl")
- }
- if len(pc.RequiredParametersHCL) > 0 {
- pc.Permissions.RequiredParameters = pc.RequiredParametersHCL[:]
- }
-
- PathFinished:
- paths = append(paths, &pc)
- }
-
- result.Paths = paths
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store.go b/vendor/github.com/hashicorp/vault/vault/policy_store.go
deleted file mode 100644
index 2c7e1296..00000000
--- a/vendor/github.com/hashicorp/vault/vault/policy_store.go
+++ /dev/null
@@ -1,840 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "path"
- "strings"
- "sync"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/golang-lru"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // policySubPath is the sub-path used for the policy store view. This is
- // nested under the system view. policyRGPSubPath/policyEGPSubPath are
- // similar but for RGPs/EGPs.
- policyACLSubPath = "policy/"
- policyRGPSubPath = "policy-rgp/"
- policyEGPSubPath = "policy-egp/"
-
- // policyCacheSize is the number of policies that are kept cached
- policyCacheSize = 1024
-
- // defaultPolicyName is the name of the default policy
- defaultPolicyName = "default"
-
- // responseWrappingPolicyName is the name of the fixed policy
- responseWrappingPolicyName = "response-wrapping"
-
- // controlGroupPolicyName is the name of the fixed policy for control group
- // tokens
- controlGroupPolicyName = "control-group"
-
- // responseWrappingPolicy is the policy that ensures cubbyhole response
- // wrapping can always succeed.
- responseWrappingPolicy = `
-path "cubbyhole/response" {
- capabilities = ["create", "read"]
-}
-
-path "sys/wrapping/unwrap" {
- capabilities = ["update"]
-}
-`
- // controlGroupPolicy is the policy that ensures control group requests can
- // commit themselves
- controlGroupPolicy = `
-path "cubbyhole/control-group" {
- capabilities = ["update", "create", "read"]
-}
-
-path "sys/wrapping/unwrap" {
- capabilities = ["update"]
-}
-`
- // defaultPolicy is the "default" policy
- defaultPolicy = `
-# Allow tokens to look up their own properties
-path "auth/token/lookup-self" {
- capabilities = ["read"]
-}
-
-# Allow tokens to renew themselves
-path "auth/token/renew-self" {
- capabilities = ["update"]
-}
-
-# Allow tokens to revoke themselves
-path "auth/token/revoke-self" {
- capabilities = ["update"]
-}
-
-# Allow a token to look up its own capabilities on a path
-path "sys/capabilities-self" {
- capabilities = ["update"]
-}
-
-# Allow a token to look up its resultant ACL from all policies. This is useful
-# for UIs. It is an internal path because the format may change at any time
-# based on how the internal ACL features and capabilities change.
-path "sys/internal/ui/resultant-acl" {
- capabilities = ["read"]
-}
-
-# Allow a token to renew a lease via lease_id in the request body; old path for
-# old clients, new path for newer
-path "sys/renew" {
- capabilities = ["update"]
-}
-path "sys/leases/renew" {
- capabilities = ["update"]
-}
-
-# Allow looking up lease properties. This requires knowing the lease ID ahead
-# of time and does not divulge any sensitive information.
-path "sys/leases/lookup" {
- capabilities = ["update"]
-}
-
-# Allow a token to manage its own cubbyhole
-path "cubbyhole/*" {
- capabilities = ["create", "read", "update", "delete", "list"]
-}
-
-# Allow a token to wrap arbitrary values in a response-wrapping token
-path "sys/wrapping/wrap" {
- capabilities = ["update"]
-}
-
-# Allow a token to look up the creation time and TTL of a given
-# response-wrapping token
-path "sys/wrapping/lookup" {
- capabilities = ["update"]
-}
-
-# Allow a token to unwrap a response-wrapping token. This is a convenience to
-# avoid client token swapping since this is also part of the response wrapping
-# policy.
-path "sys/wrapping/unwrap" {
- capabilities = ["update"]
-}
-
-# Allow general purpose tools
-path "sys/tools/hash" {
- capabilities = ["update"]
-}
-path "sys/tools/hash/*" {
- capabilities = ["update"]
-}
-path "sys/tools/random" {
- capabilities = ["update"]
-}
-path "sys/tools/random/*" {
- capabilities = ["update"]
-}
-
-# Allow checking the status of a Control Group request if the user has the
-# accessor
-path "sys/control-group/request" {
- capabilities = ["update"]
-}
-`
-)
-
-var (
- immutablePolicies = []string{
- "root",
- responseWrappingPolicyName,
- controlGroupPolicyName,
- }
- nonAssignablePolicies = []string{
- responseWrappingPolicyName,
- controlGroupPolicyName,
- }
-)
-
-// PolicyStore is used to provide durable storage of policy, and to
-// manage ACLs associated with them.
-type PolicyStore struct {
- entPolicyStore
-
- core *Core
- aclView *BarrierView
- rgpView *BarrierView
- egpView *BarrierView
-
- tokenPoliciesLRU *lru.TwoQueueCache
- egpLRU *lru.TwoQueueCache
-
- // This is used to ensure that writes to the store (acl/rgp) or to the egp
- // path tree don't happen concurrently. We are okay reading stale data so
- // long as there aren't concurrent writes.
- modifyLock *sync.RWMutex
-
- // Stores whether a token policy is ACL or RGP
- policyTypeMap sync.Map
-
- // logger is the server logger copied over from core
- logger log.Logger
-}
-
-// PolicyEntry is used to store a policy by name
-type PolicyEntry struct {
- sentinelPolicy
-
- Version int
- Raw string
- Templated bool
- Type PolicyType
-}
-
-// NewPolicyStore creates a new PolicyStore that is backed
-// using a given view. It used used to durable store and manage named policy.
-func NewPolicyStore(ctx context.Context, core *Core, baseView *BarrierView, system logical.SystemView, logger log.Logger) (*PolicyStore, error) {
- ps := &PolicyStore{
- aclView: baseView.SubView(policyACLSubPath),
- rgpView: baseView.SubView(policyRGPSubPath),
- egpView: baseView.SubView(policyEGPSubPath),
- modifyLock: new(sync.RWMutex),
- logger: logger,
- core: core,
- }
-
- ps.extraInit()
-
- if !system.CachingDisabled() {
- cache, _ := lru.New2Q(policyCacheSize)
- ps.tokenPoliciesLRU = cache
- cache, _ = lru.New2Q(policyCacheSize)
- ps.egpLRU = cache
- }
-
- aclView := ps.getACLView(namespace.RootNamespace)
- keys, err := logical.CollectKeys(namespace.RootContext(ctx), aclView)
- if err != nil {
- ps.logger.Error("error collecting acl policy keys", "error", err)
- return nil, err
- }
- for _, key := range keys {
- index := ps.cacheKey(namespace.RootNamespace, ps.sanitizeName(key))
- ps.policyTypeMap.Store(index, PolicyTypeACL)
- }
-
- if err := ps.loadNamespacePolicies(ctx, core); err != nil {
- return nil, err
- }
-
- // Special-case root; doesn't exist on disk but does need to be found
- ps.policyTypeMap.Store(ps.cacheKey(namespace.RootNamespace, "root"), PolicyTypeACL)
- return ps, nil
-}
-
-// setupPolicyStore is used to initialize the policy store
-// when the vault is being unsealed.
-func (c *Core) setupPolicyStore(ctx context.Context) error {
- // Create the policy store
- var err error
- sysView := &dynamicSystemView{core: c}
- psLogger := c.baseLogger.Named("policy")
- c.AddLogger(psLogger)
- c.policyStore, err = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger)
- if err != nil {
- return err
- }
-
- if c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) {
- // Policies will sync from the primary
- return nil
- }
-
- // Ensure that the default policy exists, and if not, create it
- if err := c.policyStore.loadACLPolicy(ctx, defaultPolicyName, defaultPolicy); err != nil {
- return err
- }
- // Ensure that the response wrapping policy exists
- if err := c.policyStore.loadACLPolicy(ctx, responseWrappingPolicyName, responseWrappingPolicy); err != nil {
- return err
- }
- // Ensure that the control group policy exists
- if err := c.policyStore.loadACLPolicy(ctx, controlGroupPolicyName, controlGroupPolicy); err != nil {
- return err
- }
-
- return nil
-}
-
-// teardownPolicyStore is used to reverse setupPolicyStore
-// when the vault is being sealed.
-func (c *Core) teardownPolicyStore() error {
- c.policyStore = nil
- return nil
-}
-
-func (ps *PolicyStore) invalidate(ctx context.Context, name string, policyType PolicyType) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- ps.logger.Error("unable to invalidate key, no namespace info passed", "key", name)
- return
- }
-
- // This may come with a prefixed "/" due to joining the file path
- saneName := strings.TrimPrefix(name, "/")
- index := ps.cacheKey(ns, saneName)
-
- ps.modifyLock.Lock()
- defer ps.modifyLock.Unlock()
-
- // We don't lock before removing from the LRU here because the worst that
- // can happen is we load again if something since added it
- switch policyType {
- case PolicyTypeACL, PolicyTypeRGP:
- if ps.tokenPoliciesLRU != nil {
- ps.tokenPoliciesLRU.Remove(index)
- }
-
- case PolicyTypeEGP:
- if ps.egpLRU != nil {
- ps.egpLRU.Remove(index)
- }
-
- default:
- // Can't do anything
- return
- }
-
- // Force a reload
- out, err := ps.switchedGetPolicy(ctx, name, policyType, false)
- if err != nil {
- ps.logger.Error("error fetching policy after invalidation", "name", saneName)
- }
-
- // If true, the invalidation was actually a delete, so we may need to
- // perform further deletion tasks. We skip the physical deletion just in
- // case another process has re-written the policy; instead next time Get is
- // called the values will be loaded back in.
- if out == nil {
- ps.switchedDeletePolicy(ctx, name, policyType, false)
- }
-
- return
-}
-
-// SetPolicy is used to create or update the given policy
-func (ps *PolicyStore) SetPolicy(ctx context.Context, p *Policy) error {
- defer metrics.MeasureSince([]string{"policy", "set_policy"}, time.Now())
- if p == nil {
- return fmt.Errorf("nil policy passed in for storage")
- }
- if p.Name == "" {
- return fmt.Errorf("policy name missing")
- }
- // Policies are normalized to lower-case
- p.Name = ps.sanitizeName(p.Name)
- if strutil.StrListContains(immutablePolicies, p.Name) {
- return fmt.Errorf("cannot update %q policy", p.Name)
- }
-
- return ps.setPolicyInternal(ctx, p)
-}
-
-func (ps *PolicyStore) setPolicyInternal(ctx context.Context, p *Policy) error {
- ps.modifyLock.Lock()
- defer ps.modifyLock.Unlock()
-
- // Get the appropriate view based on policy type and namespace
- view := ps.getBarrierView(p.namespace, p.Type)
- if view == nil {
- return fmt.Errorf("unable to get the barrier subview for policy type %q", p.Type)
- }
-
- if err := ps.parseEGPPaths(p); err != nil {
- return err
- }
-
- // Create the entry
- entry, err := logical.StorageEntryJSON(p.Name, &PolicyEntry{
- Version: 2,
- Raw: p.Raw,
- Type: p.Type,
- Templated: p.Templated,
- sentinelPolicy: p.sentinelPolicy,
- })
- if err != nil {
- return errwrap.Wrapf("failed to create entry: {{err}}", err)
- }
-
- // Construct the cache key
- index := ps.cacheKey(p.namespace, p.Name)
-
- switch p.Type {
- case PolicyTypeACL:
- rgpView := ps.getRGPView(p.namespace)
- rgp, err := rgpView.Get(ctx, entry.Key)
- if err != nil {
- return errwrap.Wrapf("failed looking up conflicting policy: {{err}}", err)
- }
- if rgp != nil {
- return fmt.Errorf("cannot reuse policy names between ACLs and RGPs")
- }
-
- if err := view.Put(ctx, entry); err != nil {
- return errwrap.Wrapf("failed to persist policy: {{err}}", err)
- }
-
- ps.policyTypeMap.Store(index, PolicyTypeACL)
-
- if ps.tokenPoliciesLRU != nil {
- ps.tokenPoliciesLRU.Add(index, p)
- }
-
- case PolicyTypeRGP:
- aclView := ps.getACLView(p.namespace)
- acl, err := aclView.Get(ctx, entry.Key)
- if err != nil {
- return errwrap.Wrapf("failed looking up conflicting policy: {{err}}", err)
- }
- if acl != nil {
- return fmt.Errorf("cannot reuse policy names between ACLs and RGPs")
- }
-
- if err := ps.handleSentinelPolicy(ctx, p, view, entry); err != nil {
- return err
- }
-
- ps.policyTypeMap.Store(index, PolicyTypeRGP)
-
- // We load here after successfully loading into Sentinel so that on
- // error we will try loading again on the next get
- if ps.tokenPoliciesLRU != nil {
- ps.tokenPoliciesLRU.Add(index, p)
- }
-
- case PolicyTypeEGP:
- if err := ps.handleSentinelPolicy(ctx, p, view, entry); err != nil {
- return err
- }
-
- // We load here after successfully loading into Sentinel so that on
- // error we will try loading again on the next get
- if ps.egpLRU != nil {
- ps.egpLRU.Add(index, p)
- }
-
- default:
- return fmt.Errorf("unknown policy type, cannot set")
- }
-
- return nil
-}
-
-// GetPolicy is used to fetch the named policy
-func (ps *PolicyStore) GetPolicy(ctx context.Context, name string, policyType PolicyType) (*Policy, error) {
- return ps.switchedGetPolicy(ctx, name, policyType, true)
-}
-
-func (ps *PolicyStore) switchedGetPolicy(ctx context.Context, name string, policyType PolicyType, grabLock bool) (*Policy, error) {
- defer metrics.MeasureSince([]string{"policy", "get_policy"}, time.Now())
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- // Policies are normalized to lower-case
- name = ps.sanitizeName(name)
- index := ps.cacheKey(ns, name)
-
- var cache *lru.TwoQueueCache
- var view *BarrierView
-
- switch policyType {
- case PolicyTypeACL:
- cache = ps.tokenPoliciesLRU
- view = ps.getACLView(ns)
- case PolicyTypeRGP:
- cache = ps.tokenPoliciesLRU
- view = ps.getRGPView(ns)
- case PolicyTypeEGP:
- cache = ps.egpLRU
- view = ps.getEGPView(ns)
- case PolicyTypeToken:
- cache = ps.tokenPoliciesLRU
- val, ok := ps.policyTypeMap.Load(index)
- if !ok {
- // Doesn't exist
- return nil, nil
- }
- policyType = val.(PolicyType)
- switch policyType {
- case PolicyTypeACL:
- view = ps.getACLView(ns)
- case PolicyTypeRGP:
- view = ps.getRGPView(ns)
- default:
- return nil, fmt.Errorf("invalid type of policy in type map: %q", policyType)
- }
- }
-
- if cache != nil {
- // Check for cached policy
- if raw, ok := cache.Get(index); ok {
- return raw.(*Policy), nil
- }
- }
-
- // Special case the root policy
- if policyType == PolicyTypeACL && name == "root" && ns.ID == namespace.RootNamespaceID {
- p := &Policy{
- Name: "root",
- namespace: namespace.RootNamespace,
- }
- if cache != nil {
- cache.Add(index, p)
- }
- return p, nil
- }
-
- if grabLock {
- ps.modifyLock.Lock()
- defer ps.modifyLock.Unlock()
- }
-
- // See if anything has added it since we got the lock
- if cache != nil {
- if raw, ok := cache.Get(index); ok {
- return raw.(*Policy), nil
- }
- }
-
- // Nil-check on the view before proceeding to retrive from storage
- if view == nil {
- return nil, fmt.Errorf("unable to get the barrier subview for policy type %q", policyType)
- }
-
- out, err := view.Get(ctx, name)
- if err != nil {
- return nil, errwrap.Wrapf("failed to read policy: {{err}}", err)
- }
-
- if out == nil {
- return nil, nil
- }
-
- policyEntry := new(PolicyEntry)
- policy := new(Policy)
- err = out.DecodeJSON(policyEntry)
- if err != nil {
- return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
- }
-
- // Set these up here so that they're available for loading into
- // Sentinel
- policy.Name = name
- policy.Raw = policyEntry.Raw
- policy.Type = policyEntry.Type
- policy.Templated = policyEntry.Templated
- policy.sentinelPolicy = policyEntry.sentinelPolicy
- policy.namespace = ns
- switch policyEntry.Type {
- case PolicyTypeACL:
- // Parse normally
- p, err := ParseACLPolicy(ns, policyEntry.Raw)
- if err != nil {
- return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
- }
- policy.Paths = p.Paths
-
- // Reset this in case they set the name in the policy itself
- policy.Name = name
-
- ps.policyTypeMap.Store(index, PolicyTypeACL)
-
- case PolicyTypeRGP:
- if err := ps.handleSentinelPolicy(ctx, policy, nil, nil); err != nil {
- return nil, err
- }
-
- ps.policyTypeMap.Store(index, PolicyTypeRGP)
-
- case PolicyTypeEGP:
- if err := ps.handleSentinelPolicy(ctx, policy, nil, nil); err != nil {
- return nil, err
- }
-
- default:
- return nil, fmt.Errorf("unknown policy type %q", policyEntry.Type.String())
- }
-
- if cache != nil {
- // Update the LRU cache
- cache.Add(index, policy)
- }
-
- return policy, nil
-}
-
-// ListPolicies is used to list the available policies
-func (ps *PolicyStore) ListPolicies(ctx context.Context, policyType PolicyType) ([]string, error) {
- defer metrics.MeasureSince([]string{"policy", "list_policies"}, time.Now())
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- // Get the appropriate view based on policy type and namespace
- view := ps.getBarrierView(ns, policyType)
- if view == nil {
- return []string{}, fmt.Errorf("unable to get the barrier subview for policy type %q", policyType)
- }
-
- // Scan the view, since the policy names are the same as the
- // key names.
- var keys []string
- switch policyType {
- case PolicyTypeACL:
- keys, err = logical.CollectKeys(ctx, view)
- case PolicyTypeRGP:
- return logical.CollectKeys(ctx, view)
- case PolicyTypeEGP:
- return logical.CollectKeys(ctx, view)
- default:
- return nil, fmt.Errorf("unknown policy type %q", policyType)
- }
-
- // We only have non-assignable ACL policies at the moment
- for _, nonAssignable := range nonAssignablePolicies {
- deleteIndex := -1
- // Find indices of non-assignable policies in keys
- for index, key := range keys {
- if key == nonAssignable {
- // Delete collection outside the loop
- deleteIndex = index
- break
- }
- }
- // Remove non-assignable policies when found
- if deleteIndex != -1 {
- keys = append(keys[:deleteIndex], keys[deleteIndex+1:]...)
- }
- }
-
- return keys, err
-}
-
-// DeletePolicy is used to delete the named policy
-func (ps *PolicyStore) DeletePolicy(ctx context.Context, name string, policyType PolicyType) error {
- return ps.switchedDeletePolicy(ctx, name, policyType, true)
-}
-
-func (ps *PolicyStore) switchedDeletePolicy(ctx context.Context, name string, policyType PolicyType, physicalDeletion bool) error {
- defer metrics.MeasureSince([]string{"policy", "delete_policy"}, time.Now())
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- // If not set, the call comes from invalidation, where we'll already have
- // grabbed the lock
- if physicalDeletion {
- ps.modifyLock.Lock()
- defer ps.modifyLock.Unlock()
- }
-
- // Policies are normalized to lower-case
- name = ps.sanitizeName(name)
- index := ps.cacheKey(ns, name)
-
- view := ps.getBarrierView(ns, policyType)
- if view == nil {
- return fmt.Errorf("unable to get the barrier subview for policy type %q", policyType)
- }
-
- switch policyType {
- case PolicyTypeACL:
- if strutil.StrListContains(immutablePolicies, name) {
- return fmt.Errorf("cannot delete %q policy", name)
- }
- if name == "default" {
- return fmt.Errorf("cannot delete default policy")
- }
-
- if physicalDeletion {
- err := view.Delete(ctx, name)
- if err != nil {
- return errwrap.Wrapf("failed to delete policy: {{err}}", err)
- }
- }
-
- if ps.tokenPoliciesLRU != nil {
- // Clear the cache
- ps.tokenPoliciesLRU.Remove(index)
- }
-
- ps.policyTypeMap.Delete(index)
-
- case PolicyTypeRGP:
- if physicalDeletion {
- err := view.Delete(ctx, name)
- if err != nil {
- return errwrap.Wrapf("failed to delete policy: {{err}}", err)
- }
- }
-
- if ps.tokenPoliciesLRU != nil {
- // Clear the cache
- ps.tokenPoliciesLRU.Remove(index)
- }
-
- ps.policyTypeMap.Delete(index)
-
- defer ps.core.invalidateSentinelPolicy(policyType, index)
-
- case PolicyTypeEGP:
- if physicalDeletion {
- err := view.Delete(ctx, name)
- if err != nil {
- return errwrap.Wrapf("failed to delete policy: {{err}}", err)
- }
- }
-
- if ps.egpLRU != nil {
- // Clear the cache
- ps.egpLRU.Remove(index)
- }
-
- defer ps.core.invalidateSentinelPolicy(policyType, index)
-
- ps.invalidateEGPTreePath(index)
- }
-
- return nil
-}
-
-type TemplateError struct {
- Err error
-}
-
-func (t *TemplateError) WrappedErrors() []error {
- return []error{t.Err}
-}
-
-func (t *TemplateError) Error() string {
- return t.Err.Error()
-}
-
-// ACL is used to return an ACL which is built using the
-// named policies.
-func (ps *PolicyStore) ACL(ctx context.Context, entity *identity.Entity, policyNames map[string][]string) (*ACL, error) {
- var policies []*Policy
- // Fetch the policies
- for nsID, nsPolicyNames := range policyNames {
- policyNS, err := NamespaceByID(ctx, nsID, ps.core)
- if err != nil {
- return nil, err
- }
- if policyNS == nil {
- return nil, namespace.ErrNoNamespace
- }
- policyCtx := namespace.ContextWithNamespace(ctx, policyNS)
- for _, nsPolicyName := range nsPolicyNames {
- p, err := ps.GetPolicy(policyCtx, nsPolicyName, PolicyTypeToken)
- if err != nil {
- return nil, errwrap.Wrapf("failed to get policy: {{err}}", err)
- }
- if p != nil {
- policies = append(policies, p)
- }
- }
- }
-
- var fetchedGroups bool
- var groups []*identity.Group
- for i, policy := range policies {
- if policy.Type == PolicyTypeACL && policy.Templated {
- if !fetchedGroups {
- fetchedGroups = true
- if entity != nil {
- directGroups, inheritedGroups, err := ps.core.identityStore.groupsByEntityID(entity.ID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch group memberships: {{err}}", err)
- }
- groups = append(directGroups, inheritedGroups...)
- }
- }
- p, err := parseACLPolicyWithTemplating(policy.namespace, policy.Raw, true, entity, groups)
- if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error parsing templated policy %q: {{err}}", policy.Name), err)
- }
- p.Name = policy.Name
- policies[i] = p
- }
- }
-
- // Construct the ACL
- acl, err := NewACL(ctx, policies)
- if err != nil {
- return nil, errwrap.Wrapf("failed to construct ACL: {{err}}", err)
- }
-
- return acl, nil
-}
-
-// loadACLPolicy is used to load default ACL policies. The default policies will
-// be loaded to all namespaces.
-func (ps *PolicyStore) loadACLPolicy(ctx context.Context, policyName, policyText string) error {
- return ps.loadACLPolicyNamespaces(ctx, policyName, policyText)
-}
-
-// loadACLPolicyInternal is used to load default ACL policies in a specific
-// namespace.
-func (ps *PolicyStore) loadACLPolicyInternal(ctx context.Context, policyName, policyText string) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
-
- // Check if the policy already exists
- policy, err := ps.GetPolicy(ctx, policyName, PolicyTypeACL)
- if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error fetching %s policy from store: {{err}}", policyName), err)
- }
- if policy != nil {
- if !strutil.StrListContains(immutablePolicies, policyName) || policyText == policy.Raw {
- return nil
- }
- }
-
- policy, err = ParseACLPolicy(ns, policyText)
- if err != nil {
- return errwrap.Wrapf(fmt.Sprintf("error parsing %s policy: {{err}}", policyName), err)
- }
-
- if policy == nil {
- return fmt.Errorf("parsing %q policy resulted in nil policy", policyName)
- }
-
- policy.Name = policyName
- policy.Type = PolicyTypeACL
- return ps.setPolicyInternal(ctx, policy)
-}
-
-func (ps *PolicyStore) sanitizeName(name string) string {
- return strings.ToLower(strings.TrimSpace(name))
-}
-
-func (ps *PolicyStore) cacheKey(ns *namespace.Namespace, name string) string {
- return path.Join(ns.ID, name)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_store_util.go b/vendor/github.com/hashicorp/vault/vault/policy_store_util.go
deleted file mode 100644
index c2c7a35a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/policy_store_util.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
-
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
-)
-
-type entPolicyStore struct{}
-
-func (ps *PolicyStore) extraInit() {
-}
-
-func (ps *PolicyStore) loadNamespacePolicies(context.Context, *Core) error { return nil }
-
-func (ps *PolicyStore) getACLView(*namespace.Namespace) *BarrierView {
- return ps.aclView
-}
-
-func (ps *PolicyStore) getRGPView(ns *namespace.Namespace) *BarrierView {
- return ps.rgpView
-}
-
-func (ps *PolicyStore) getEGPView(ns *namespace.Namespace) *BarrierView {
- return ps.egpView
-}
-
-func (ps *PolicyStore) getBarrierView(ns *namespace.Namespace, _ PolicyType) *BarrierView {
- return ps.getACLView(ns)
-}
-
-func (ps *PolicyStore) handleSentinelPolicy(context.Context, *Policy, *BarrierView, *logical.StorageEntry) error {
- return nil
-}
-
-func (ps *PolicyStore) parseEGPPaths(*Policy) error { return nil }
-
-func (ps *PolicyStore) invalidateEGPTreePath(string) {}
-
-func (ps *PolicyStore) pathsToEGPPaths(*Policy) ([]*egpPath, error) { return nil, nil }
-
-func (ps *PolicyStore) loadACLPolicyNamespaces(ctx context.Context, policyName, policyText string) error {
- return ps.loadACLPolicyInternal(namespace.RootContext(ctx), policyName, policyText)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/policy_util.go b/vendor/github.com/hashicorp/vault/vault/policy_util.go
deleted file mode 100644
index 74b92639..00000000
--- a/vendor/github.com/hashicorp/vault/vault/policy_util.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// +build !enterprise
-
-package vault
-
-type sentinelPolicy struct{}
diff --git a/vendor/github.com/hashicorp/vault/vault/rekey.go b/vendor/github.com/hashicorp/vault/vault/rekey.go
deleted file mode 100644
index ad7d914f..00000000
--- a/vendor/github.com/hashicorp/vault/vault/rekey.go
+++ /dev/null
@@ -1,972 +0,0 @@
-package vault
-
-import (
- "bytes"
- "context"
- "crypto/subtle"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "net/http"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/pgpkeys"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/shamir"
-)
-
-const (
- // coreUnsealKeysBackupPath is the path used to backup encrypted unseal
- // keys if specified during a rekey operation. This is outside of the
- // barrier.
- coreBarrierUnsealKeysBackupPath = "core/unseal-keys-backup"
-
- // coreRecoveryUnsealKeysBackupPath is the path used to backup encrypted
- // recovery keys if specified during a rekey operation. This is outside of
- // the barrier.
- coreRecoveryUnsealKeysBackupPath = "core/recovery-keys-backup"
-)
-
-// RekeyResult is used to provide the key parts back after
-// they are generated as part of the rekey.
-type RekeyResult struct {
- SecretShares [][]byte
- PGPFingerprints []string
- Backup bool
- RecoveryKey bool
- VerificationRequired bool
- VerificationNonce string
-}
-
-type RekeyVerifyResult struct {
- Complete bool
- Nonce string
-}
-
-// RekeyBackup stores the backup copy of PGP-encrypted keys
-type RekeyBackup struct {
- Nonce string
- Keys map[string][]string
-}
-
-// RekeyThreshold returns the secret threshold for the current seal
-// config. This threshold can either be the barrier key threshold or
-// the recovery key threshold, depending on whether rekey is being
-// performed on the recovery key, or whether the seal supports
-// recovery keys.
-func (c *Core) RekeyThreshold(ctx context.Context, recovery bool) (int, logical.HTTPCodedError) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return 0, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return 0, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.RLock()
- defer c.rekeyLock.RUnlock()
-
- var config *SealConfig
- var err error
- // If we are rekeying the recovery key, or if the seal supports
- // recovery keys and we are rekeying the barrier key, we use the
- // recovery config as the threshold instead.
- if recovery || c.seal.RecoveryKeySupported() {
- config, err = c.seal.RecoveryConfig(ctx)
- } else {
- config, err = c.seal.BarrierConfig(ctx)
- }
- if err != nil {
- return 0, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("unable to look up config: {{err}}", err).Error())
- }
- if config == nil {
- return 0, logical.CodedError(http.StatusBadRequest, ErrNotInit.Error())
- }
-
- return config.SecretThreshold, nil
-}
-
-// RekeyProgress is used to return the rekey progress (num shares).
-func (c *Core) RekeyProgress(recovery, verification bool) (bool, int, logical.HTTPCodedError) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return false, 0, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return false, 0, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.RLock()
- defer c.rekeyLock.RUnlock()
-
- var conf *SealConfig
- if recovery {
- conf = c.recoveryRekeyConfig
- } else {
- conf = c.barrierRekeyConfig
- }
-
- if conf == nil {
- return false, 0, logical.CodedError(http.StatusBadRequest, "rekey operation not in progress")
- }
-
- if verification {
- return len(conf.VerificationKey) > 0, len(conf.VerificationProgress), nil
- }
- return true, len(conf.RekeyProgress), nil
-}
-
-// RekeyConfig is used to read the rekey configuration
-func (c *Core) RekeyConfig(recovery bool) (*SealConfig, logical.HTTPCodedError) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- // Copy the seal config if any
- var conf *SealConfig
- if recovery {
- if c.recoveryRekeyConfig != nil {
- conf = c.recoveryRekeyConfig.Clone()
- }
- } else {
- if c.barrierRekeyConfig != nil {
- conf = c.barrierRekeyConfig.Clone()
- }
- }
-
- return conf, nil
-}
-
-// RekeyInit will either initialize the rekey of barrier or recovery key.
-// recovery determines whether this is a rekey on the barrier or recovery key.
-func (c *Core) RekeyInit(config *SealConfig, recovery bool) logical.HTTPCodedError {
- if config.SecretThreshold > config.SecretShares {
- return logical.CodedError(http.StatusBadRequest, "provided threshold greater than the total shares")
- }
-
- if recovery {
- return c.RecoveryRekeyInit(config)
- }
- return c.BarrierRekeyInit(config)
-}
-
-// BarrierRekeyInit is used to initialize the rekey settings for the barrier key
-func (c *Core) BarrierRekeyInit(config *SealConfig) logical.HTTPCodedError {
- if c.seal.StoredKeysSupported() {
- c.logger.Warn("stored keys supported, forcing rekey shares/threshold to 1")
- config.SecretShares = 1
- config.SecretThreshold = 1
- config.StoredShares = 1
- }
-
- if config.StoredShares > 0 {
- if !c.seal.StoredKeysSupported() {
- return logical.CodedError(http.StatusBadRequest, "storing keys not supported by barrier seal")
- }
- if len(config.PGPKeys) > 0 {
- return logical.CodedError(http.StatusBadRequest, "PGP key encryption not supported when using stored keys")
- }
- if config.Backup {
- return logical.CodedError(http.StatusBadRequest, "key backup not supported when using stored keys")
- }
-
- if c.seal.RecoveryKeySupported() {
- if config.VerificationRequired {
- return logical.CodedError(http.StatusBadRequest, "requiring verification not supported when rekeying the barrier key with recovery keys")
- }
- c.logger.Debug("using recovery seal configuration to rekey barrier key")
- }
- }
-
- // Check if the seal configuration is valid
- if err := config.Validate(); err != nil {
- c.logger.Error("invalid rekey seal configuration", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("invalid rekey seal configuration: {{err}}", err).Error())
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- // Prevent multiple concurrent re-keys
- if c.barrierRekeyConfig != nil {
- return logical.CodedError(http.StatusBadRequest, "rekey already in progress")
- }
-
- // Copy the configuration
- c.barrierRekeyConfig = config.Clone()
-
- // Initialize the nonce
- nonce, err := uuid.GenerateUUID()
- if err != nil {
- c.barrierRekeyConfig = nil
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error generating nonce for procedure: {{err}}", err).Error())
- }
- c.barrierRekeyConfig.Nonce = nonce
-
- if c.logger.IsInfo() {
- c.logger.Info("rekey initialized", "nonce", c.barrierRekeyConfig.Nonce, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold, "validation_required", c.barrierRekeyConfig.VerificationRequired)
- }
- return nil
-}
-
-// RecoveryRekeyInit is used to initialize the rekey settings for the recovery key
-func (c *Core) RecoveryRekeyInit(config *SealConfig) logical.HTTPCodedError {
- if config.StoredShares > 0 {
- return logical.CodedError(http.StatusBadRequest, "stored shares not supported by recovery key")
- }
-
- // Check if the seal configuration is valid
- if err := config.Validate(); err != nil {
- c.logger.Error("invalid recovery configuration", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("invalid recovery configuration: {{err}}", err).Error())
- }
-
- if !c.seal.RecoveryKeySupported() {
- return logical.CodedError(http.StatusBadRequest, "recovery keys not supported")
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- // Prevent multiple concurrent re-keys
- if c.recoveryRekeyConfig != nil {
- return logical.CodedError(http.StatusBadRequest, "rekey already in progress")
- }
-
- // Copy the configuration
- c.recoveryRekeyConfig = config.Clone()
-
- // Initialize the nonce
- nonce, err := uuid.GenerateUUID()
- if err != nil {
- c.recoveryRekeyConfig = nil
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error generating nonce for procedure: {{err}}", err).Error())
- }
- c.recoveryRekeyConfig.Nonce = nonce
-
- if c.logger.IsInfo() {
- c.logger.Info("rekey initialized", "nonce", c.recoveryRekeyConfig.Nonce, "shares", c.recoveryRekeyConfig.SecretShares, "threshold", c.recoveryRekeyConfig.SecretThreshold, "validation_required", c.recoveryRekeyConfig.VerificationRequired)
- }
- return nil
-}
-
-// RekeyUpdate is used to provide a new key part for the barrier or recovery key.
-func (c *Core) RekeyUpdate(ctx context.Context, key []byte, nonce string, recovery bool) (*RekeyResult, logical.HTTPCodedError) {
- if recovery {
- return c.RecoveryRekeyUpdate(ctx, key, nonce)
- }
- return c.BarrierRekeyUpdate(ctx, key, nonce)
-}
-
-// BarrierRekeyUpdate is used to provide a new key part. Barrier rekey can be done
-// with unseal keys, or recovery keys if that's supported and we are storing the barrier
-// key.
-//
-// N.B.: If recovery keys are used to rekey, the new barrier key shares are not returned.
-func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string) (*RekeyResult, logical.HTTPCodedError) {
- // Ensure we are already unsealed
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- // Verify the key length
- min, max := c.barrier.KeyLength()
- max += shamir.ShareOverhead
- if len(key) < min {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is shorter than minimum %d bytes", min))
- }
- if len(key) > max {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is longer than maximum %d bytes", max))
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- // Get the seal configuration
- var existingConfig *SealConfig
- var err error
- var useRecovery bool // Determines whether recovery key is being used to rekey the master key
- if c.seal.StoredKeysSupported() && c.seal.RecoveryKeySupported() {
- existingConfig, err = c.seal.RecoveryConfig(ctx)
- useRecovery = true
- } else {
- existingConfig, err = c.seal.BarrierConfig(ctx)
- }
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing config: {{err}}", err).Error())
- }
- // Ensure the barrier is initialized
- if existingConfig == nil {
- return nil, logical.CodedError(http.StatusBadRequest, ErrNotInit.Error())
- }
-
- // Ensure a rekey is in progress
- if c.barrierRekeyConfig == nil {
- return nil, logical.CodedError(http.StatusBadRequest, "no barrier rekey in progress")
- }
-
- if len(c.barrierRekeyConfig.VerificationKey) > 0 {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("rekey operation already finished; verification must be performed; nonce for the verification operation is %q", c.barrierRekeyConfig.VerificationNonce))
- }
-
- if nonce != c.barrierRekeyConfig.Nonce {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("incorrect nonce supplied; nonce for this rekey operation is %q", c.barrierRekeyConfig.Nonce))
- }
-
- // Check if we already have this piece
- for _, existing := range c.barrierRekeyConfig.RekeyProgress {
- if subtle.ConstantTimeCompare(existing, key) == 1 {
- return nil, logical.CodedError(http.StatusBadRequest, "given key has already been provided during this generation operation")
- }
- }
-
- // Store this key
- c.barrierRekeyConfig.RekeyProgress = append(c.barrierRekeyConfig.RekeyProgress, key)
-
- // Check if we don't have enough keys to unlock
- if len(c.barrierRekeyConfig.RekeyProgress) < existingConfig.SecretThreshold {
- if c.logger.IsDebug() {
- c.logger.Debug("cannot rekey yet, not enough keys", "keys", len(c.barrierRekeyConfig.RekeyProgress), "threshold", existingConfig.SecretThreshold)
- }
- return nil, nil
- }
-
- // Recover the master key or recovery key
- var recoveredKey []byte
- if existingConfig.SecretThreshold == 1 {
- recoveredKey = c.barrierRekeyConfig.RekeyProgress[0]
- c.barrierRekeyConfig.RekeyProgress = nil
- } else {
- recoveredKey, err = shamir.Combine(c.barrierRekeyConfig.RekeyProgress)
- c.barrierRekeyConfig.RekeyProgress = nil
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute master key: {{err}}", err).Error())
- }
- }
-
- if useRecovery {
- if err := c.seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
- c.logger.Error("rekey recovery key verification failed", "error", err)
- return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("recovery key verification failed: {{err}}", err).Error())
- }
- } else {
- if err := c.barrier.VerifyMaster(recoveredKey); err != nil {
- c.logger.Error("master key verification failed", "error", err)
- return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("master key verification failed: {{err}}", err).Error())
- }
- }
-
- // Generate a new master key
- newMasterKey, err := c.barrier.GenerateKey()
- if err != nil {
- c.logger.Error("failed to generate master key", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("master key generation failed: {{err}}", err).Error())
- }
-
- results := &RekeyResult{
- Backup: c.barrierRekeyConfig.Backup,
- }
- // Set result.SecretShares to the master key if only a single key
- // part is used -- no Shamir split required.
- if c.barrierRekeyConfig.SecretShares == 1 {
- results.SecretShares = append(results.SecretShares, newMasterKey)
- } else {
- // Split the master key using the Shamir algorithm
- shares, err := shamir.Split(newMasterKey, c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold)
- if err != nil {
- c.logger.Error("failed to generate shares", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate shares: {{err}}", err).Error())
- }
- results.SecretShares = shares
- }
-
- // If we are storing any shares, add them to the shares to store and remove
- // from the returned keys
- var keysToStore [][]byte
- if c.seal.StoredKeysSupported() && c.barrierRekeyConfig.StoredShares > 0 {
- for i := 0; i < c.barrierRekeyConfig.StoredShares; i++ {
- keysToStore = append(keysToStore, results.SecretShares[0])
- results.SecretShares = results.SecretShares[1:]
- }
- }
-
- // If PGP keys are passed in, encrypt shares with corresponding PGP keys.
- if len(c.barrierRekeyConfig.PGPKeys) > 0 {
- hexEncodedShares := make([][]byte, len(results.SecretShares))
- for i, _ := range results.SecretShares {
- hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i]))
- }
- results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.barrierRekeyConfig.PGPKeys)
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to encrypt shares: {{err}}", err).Error())
- }
-
- // If backup is enabled, store backup info in vault.coreBarrierUnsealKeysBackupPath
- if c.barrierRekeyConfig.Backup {
- backupInfo := map[string][]string{}
- for i := 0; i < len(results.PGPFingerprints); i++ {
- encShare := bytes.NewBuffer(results.SecretShares[i])
- if backupInfo[results.PGPFingerprints[i]] == nil {
- backupInfo[results.PGPFingerprints[i]] = []string{hex.EncodeToString(encShare.Bytes())}
- } else {
- backupInfo[results.PGPFingerprints[i]] = append(backupInfo[results.PGPFingerprints[i]], hex.EncodeToString(encShare.Bytes()))
- }
- }
-
- backupVals := &RekeyBackup{
- Nonce: c.barrierRekeyConfig.Nonce,
- Keys: backupInfo,
- }
- buf, err := json.Marshal(backupVals)
- if err != nil {
- c.logger.Error("failed to marshal unseal key backup", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to marshal unseal key backup: {{err}}", err).Error())
- }
- pe := &physical.Entry{
- Key: coreBarrierUnsealKeysBackupPath,
- Value: buf,
- }
- if err = c.physical.Put(ctx, pe); err != nil {
- c.logger.Error("failed to save unseal key backup", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save unseal key backup: {{err}}", err).Error())
- }
- }
- }
-
- if keysToStore != nil {
- if err := c.seal.SetStoredKeys(ctx, keysToStore); err != nil {
- c.logger.Error("failed to store keys", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to store keys: {{err}}", err).Error())
- }
- }
-
- // If we are requiring validation, return now; otherwise rekey the barrier
- if c.barrierRekeyConfig.VerificationRequired {
- nonce, err := uuid.GenerateUUID()
- if err != nil {
- c.barrierRekeyConfig = nil
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate verification nonce: {{err}}", err).Error())
- }
- c.barrierRekeyConfig.VerificationNonce = nonce
- c.barrierRekeyConfig.VerificationKey = newMasterKey
-
- results.VerificationRequired = true
- results.VerificationNonce = nonce
- return results, nil
- }
-
- if err := c.performBarrierRekey(ctx, newMasterKey); err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform barrier rekey: {{err}}", err).Error())
- }
-
- c.barrierRekeyConfig = nil
- return results, nil
-}
-
-func (c *Core) performBarrierRekey(ctx context.Context, newMasterKey []byte) logical.HTTPCodedError {
- // Rekey the barrier
- if err := c.barrier.Rekey(ctx, newMasterKey); err != nil {
- c.logger.Error("failed to rekey barrier", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to rekey barrier: {{err}}", err).Error())
- }
- if c.logger.IsInfo() {
- c.logger.Info("security barrier rekeyed", "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold)
- }
-
- c.barrierRekeyConfig.VerificationKey = nil
-
- if err := c.seal.SetBarrierConfig(ctx, c.barrierRekeyConfig); err != nil {
- c.logger.Error("error saving rekey seal configuration", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save rekey seal configuration: {{err}}", err).Error())
- }
-
- // Write to the canary path, which will force a synchronous truing during
- // replication
- if err := c.barrier.Put(ctx, &Entry{
- Key: coreKeyringCanaryPath,
- Value: []byte(c.barrierRekeyConfig.Nonce),
- }); err != nil {
- c.logger.Error("error saving keyring canary", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save keyring canary: {{err}}", err).Error())
- }
-
- c.barrierRekeyConfig.RekeyProgress = nil
-
- return nil
-}
-
-// RecoveryRekeyUpdate is used to provide a new key part
-func (c *Core) RecoveryRekeyUpdate(ctx context.Context, key []byte, nonce string) (*RekeyResult, logical.HTTPCodedError) {
- // Ensure we are already unsealed
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- // Verify the key length
- min, max := c.barrier.KeyLength()
- max += shamir.ShareOverhead
- if len(key) < min {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is shorter than minimum %d bytes", min))
- }
- if len(key) > max {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is longer than maximum %d bytes", max))
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- // Get the seal configuration
- existingConfig, err := c.seal.RecoveryConfig(ctx)
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to fetch existing recovery config: {{err}}", err).Error())
- }
- // Ensure the seal is initialized
- if existingConfig == nil {
- return nil, logical.CodedError(http.StatusBadRequest, ErrNotInit.Error())
- }
-
- // Ensure a rekey is in progress
- if c.recoveryRekeyConfig == nil {
- return nil, logical.CodedError(http.StatusBadRequest, "no recovery rekey in progress")
- }
-
- if len(c.recoveryRekeyConfig.VerificationKey) > 0 {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("rekey operation already finished; verification must be performed; nonce for the verification operation is %q", c.recoveryRekeyConfig.VerificationNonce))
- }
-
- if nonce != c.recoveryRekeyConfig.Nonce {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("incorrect nonce supplied; nonce for this rekey operation is %q", c.recoveryRekeyConfig.Nonce))
- }
-
- // Check if we already have this piece
- for _, existing := range c.recoveryRekeyConfig.RekeyProgress {
- if subtle.ConstantTimeCompare(existing, key) == 1 {
- return nil, logical.CodedError(http.StatusBadRequest, "given key has already been provided during this rekey operation")
- }
- }
-
- // Store this key
- c.recoveryRekeyConfig.RekeyProgress = append(c.recoveryRekeyConfig.RekeyProgress, key)
-
- // Check if we don't have enough keys to unlock
- if len(c.recoveryRekeyConfig.RekeyProgress) < existingConfig.SecretThreshold {
- if c.logger.IsDebug() {
- c.logger.Debug("cannot rekey yet, not enough keys", "keys", len(c.recoveryRekeyConfig.RekeyProgress), "threshold", existingConfig.SecretThreshold)
- }
- return nil, nil
- }
-
- // Recover the master key
- var recoveryKey []byte
- if existingConfig.SecretThreshold == 1 {
- recoveryKey = c.recoveryRekeyConfig.RekeyProgress[0]
- c.recoveryRekeyConfig.RekeyProgress = nil
- } else {
- recoveryKey, err = shamir.Combine(c.recoveryRekeyConfig.RekeyProgress)
- c.recoveryRekeyConfig.RekeyProgress = nil
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute recovery key: {{err}}", err).Error())
- }
- }
-
- // Verify the recovery key
- if err := c.seal.VerifyRecoveryKey(ctx, recoveryKey); err != nil {
- c.logger.Error("recovery key verification failed", "error", err)
- return nil, logical.CodedError(http.StatusBadRequest, errwrap.Wrapf("recovery key verification failed: {{err}}", err).Error())
- }
-
- // Generate a new master key
- newMasterKey, err := c.barrier.GenerateKey()
- if err != nil {
- c.logger.Error("failed to generate recovery key", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("recovery key generation failed: {{err}}", err).Error())
- }
-
- // Return the master key if only a single key part is used
- results := &RekeyResult{
- Backup: c.recoveryRekeyConfig.Backup,
- }
-
- if c.recoveryRekeyConfig.SecretShares == 1 {
- results.SecretShares = append(results.SecretShares, newMasterKey)
- } else {
- // Split the master key using the Shamir algorithm
- shares, err := shamir.Split(newMasterKey, c.recoveryRekeyConfig.SecretShares, c.recoveryRekeyConfig.SecretThreshold)
- if err != nil {
- c.logger.Error("failed to generate shares", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate shares: {{err}}", err).Error())
- }
- results.SecretShares = shares
- }
-
- if len(c.recoveryRekeyConfig.PGPKeys) > 0 {
- hexEncodedShares := make([][]byte, len(results.SecretShares))
- for i, _ := range results.SecretShares {
- hexEncodedShares[i] = []byte(hex.EncodeToString(results.SecretShares[i]))
- }
- results.PGPFingerprints, results.SecretShares, err = pgpkeys.EncryptShares(hexEncodedShares, c.recoveryRekeyConfig.PGPKeys)
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to encrypt shares: {{err}}", err).Error())
- }
-
- if c.recoveryRekeyConfig.Backup {
- backupInfo := map[string][]string{}
- for i := 0; i < len(results.PGPFingerprints); i++ {
- encShare := bytes.NewBuffer(results.SecretShares[i])
- if backupInfo[results.PGPFingerprints[i]] == nil {
- backupInfo[results.PGPFingerprints[i]] = []string{hex.EncodeToString(encShare.Bytes())}
- } else {
- backupInfo[results.PGPFingerprints[i]] = append(backupInfo[results.PGPFingerprints[i]], hex.EncodeToString(encShare.Bytes()))
- }
- }
-
- backupVals := &RekeyBackup{
- Nonce: c.recoveryRekeyConfig.Nonce,
- Keys: backupInfo,
- }
- buf, err := json.Marshal(backupVals)
- if err != nil {
- c.logger.Error("failed to marshal recovery key backup", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to marshal recovery key backup: {{err}}", err).Error())
- }
- pe := &physical.Entry{
- Key: coreRecoveryUnsealKeysBackupPath,
- Value: buf,
- }
- if err = c.physical.Put(ctx, pe); err != nil {
- c.logger.Error("failed to save unseal key backup", "error", err)
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save unseal key backup: {{err}}", err).Error())
- }
- }
- }
-
- // If we are requiring validation, return now; otherwise save the recovery
- // key
- if c.recoveryRekeyConfig.VerificationRequired {
- nonce, err := uuid.GenerateUUID()
- if err != nil {
- c.recoveryRekeyConfig = nil
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to generate verification nonce: {{err}}", err).Error())
- }
- c.recoveryRekeyConfig.VerificationNonce = nonce
- c.recoveryRekeyConfig.VerificationKey = newMasterKey
-
- results.VerificationRequired = true
- results.VerificationNonce = nonce
- return results, nil
- }
-
- if err := c.performRecoveryRekey(ctx, newMasterKey); err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform recovery rekey: {{err}}", err).Error())
- }
-
- c.recoveryRekeyConfig = nil
- return results, nil
-}
-
-func (c *Core) performRecoveryRekey(ctx context.Context, newMasterKey []byte) logical.HTTPCodedError {
- if err := c.seal.SetRecoveryKey(ctx, newMasterKey); err != nil {
- c.logger.Error("failed to set recovery key", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to set recovery key: {{err}}", err).Error())
- }
-
- c.recoveryRekeyConfig.VerificationKey = nil
-
- if err := c.seal.SetRecoveryConfig(ctx, c.recoveryRekeyConfig); err != nil {
- c.logger.Error("error saving rekey seal configuration", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save rekey seal configuration: {{err}}", err).Error())
- }
-
- // Write to the canary path, which will force a synchronous truing during
- // replication
- if err := c.barrier.Put(ctx, &Entry{
- Key: coreKeyringCanaryPath,
- Value: []byte(c.recoveryRekeyConfig.Nonce),
- }); err != nil {
- c.logger.Error("error saving keyring canary", "error", err)
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to save keyring canary: {{err}}", err).Error())
- }
-
- c.recoveryRekeyConfig.RekeyProgress = nil
-
- return nil
-}
-
-func (c *Core) RekeyVerify(ctx context.Context, key []byte, nonce string, recovery bool) (ret *RekeyVerifyResult, retErr logical.HTTPCodedError) {
- // Ensure we are already unsealed
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- // Verify the key length
- min, max := c.barrier.KeyLength()
- max += shamir.ShareOverhead
- if len(key) < min {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is shorter than minimum %d bytes", min))
- }
- if len(key) > max {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("key is longer than maximum %d bytes", max))
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- config := c.barrierRekeyConfig
- if recovery {
- config = c.recoveryRekeyConfig
- }
-
- // Ensure a rekey is in progress
- if config == nil {
- return nil, logical.CodedError(http.StatusBadRequest, "no rekey in progress")
- }
-
- if len(config.VerificationKey) == 0 {
- return nil, logical.CodedError(http.StatusBadRequest, "no rekey verification in progress")
- }
-
- if nonce != config.VerificationNonce {
- return nil, logical.CodedError(http.StatusBadRequest, fmt.Sprintf("incorrect nonce supplied; nonce for this verify operation is %q", config.VerificationNonce))
- }
-
- // Check if we already have this piece
- for _, existing := range config.VerificationProgress {
- if subtle.ConstantTimeCompare(existing, key) == 1 {
- return nil, logical.CodedError(http.StatusBadRequest, "given key has already been provided during this verify operation")
- }
- }
-
- // Store this key
- config.VerificationProgress = append(config.VerificationProgress, key)
-
- // Check if we don't have enough keys to unlock
- if len(config.VerificationProgress) < config.SecretThreshold {
- if c.logger.IsDebug() {
- c.logger.Debug("cannot verify yet, not enough keys", "keys", len(config.VerificationProgress), "threshold", config.SecretThreshold)
- }
- return nil, nil
- }
-
- // Schedule the progress for forgetting and rotate the nonce if possible
- defer func() {
- config.VerificationProgress = nil
- if ret != nil && ret.Complete {
- return
- }
- // Not complete, so rotate nonce
- nonce, err := uuid.GenerateUUID()
- if err == nil {
- config.VerificationNonce = nonce
- if ret != nil {
- ret.Nonce = nonce
- }
- }
- }()
-
- // Recover the master key or recovery key
- var recoveredKey []byte
- if config.SecretThreshold == 1 {
- recoveredKey = config.VerificationProgress[0]
- } else {
- var err error
- recoveredKey, err = shamir.Combine(config.VerificationProgress)
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to compute key for verification: {{err}}", err).Error())
- }
- }
-
- if subtle.ConstantTimeCompare(recoveredKey, config.VerificationKey) != 1 {
- c.logger.Error("rekey verification failed")
- return nil, logical.CodedError(http.StatusBadRequest, "rekey verification failed; incorrect key shares supplied")
- }
-
- switch recovery {
- case false:
- if err := c.performBarrierRekey(ctx, recoveredKey); err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform rekey: {{err}}", err).Error())
- }
- c.barrierRekeyConfig = nil
- default:
- if err := c.performRecoveryRekey(ctx, recoveredKey); err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to perform recovery key rekey: {{err}}", err).Error())
- }
- c.recoveryRekeyConfig = nil
- }
-
- res := &RekeyVerifyResult{
- Nonce: config.VerificationNonce,
- Complete: true,
- }
-
- return res, nil
-}
-
-// RekeyCancel is used to cancel an in-progress rekey
-func (c *Core) RekeyCancel(recovery bool) logical.HTTPCodedError {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- // Clear any progress or config
- if recovery {
- c.recoveryRekeyConfig = nil
- } else {
- c.barrierRekeyConfig = nil
- }
- return nil
-}
-
-// RekeyVerifyRestart is used to start the verification process over
-func (c *Core) RekeyVerifyRestart(recovery bool) logical.HTTPCodedError {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- // Attempt to generate a new nonce, but don't bail if it doesn't succeed
- // (which is extraordinarily unlikely)
- nonce, nonceErr := uuid.GenerateUUID()
-
- // Clear any progress or config
- if recovery {
- c.recoveryRekeyConfig.VerificationProgress = nil
- if nonceErr == nil {
- c.recoveryRekeyConfig.VerificationNonce = nonce
- }
- } else {
- c.barrierRekeyConfig.VerificationProgress = nil
- if nonceErr == nil {
- c.barrierRekeyConfig.VerificationNonce = nonce
- }
- }
-
- return nil
-}
-
-// RekeyRetrieveBackup is used to retrieve any backed-up PGP-encrypted unseal
-// keys
-func (c *Core) RekeyRetrieveBackup(ctx context.Context, recovery bool) (*RekeyBackup, logical.HTTPCodedError) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return nil, logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.RLock()
- defer c.rekeyLock.RUnlock()
-
- var entry *physical.Entry
- var err error
- if recovery {
- entry, err = c.physical.Get(ctx, coreRecoveryUnsealKeysBackupPath)
- } else {
- entry, err = c.physical.Get(ctx, coreBarrierUnsealKeysBackupPath)
- }
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error getting keys from backup: {{err}}", err).Error())
- }
- if entry == nil {
- return nil, nil
- }
-
- ret := &RekeyBackup{}
- err = jsonutil.DecodeJSON(entry.Value, ret)
- if err != nil {
- return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error decoding backup keys: {{err}}", err).Error())
- }
-
- return ret, nil
-}
-
-// RekeyDeleteBackup is used to delete any backed-up PGP-encrypted unseal keys
-func (c *Core) RekeyDeleteBackup(ctx context.Context, recovery bool) logical.HTTPCodedError {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return logical.CodedError(http.StatusServiceUnavailable, consts.ErrSealed.Error())
- }
- if c.standby {
- return logical.CodedError(http.StatusBadRequest, consts.ErrStandby.Error())
- }
-
- c.rekeyLock.Lock()
- defer c.rekeyLock.Unlock()
-
- if recovery {
- err := c.physical.Delete(ctx, coreRecoveryUnsealKeysBackupPath)
- if err != nil {
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error deleting backup keys: {{err}}", err).Error())
- }
- return nil
- }
- err := c.physical.Delete(ctx, coreBarrierUnsealKeysBackupPath)
- if err != nil {
- return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("error deleting backup keys: {{err}}", err).Error())
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/replication_cluster_util.go b/vendor/github.com/hashicorp/vault/vault/replication_cluster_util.go
deleted file mode 100644
index 013cc8f7..00000000
--- a/vendor/github.com/hashicorp/vault/vault/replication_cluster_util.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import "github.com/hashicorp/vault/helper/consts"
-
-type ReplicatedCluster struct {
- State consts.ReplicationState
- ClusterID string
- PrimaryClusterAddr string
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding.go
deleted file mode 100644
index d0fbd286..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding.go
+++ /dev/null
@@ -1,479 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- math "math"
- "net"
- "net/http"
- "net/url"
- "sync"
- "sync/atomic"
- "time"
-
- cache "github.com/patrickmn/go-cache"
-
- uuid "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/forwarding"
- "golang.org/x/net/http2"
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
-)
-
-const (
- clusterListenerAcceptDeadline = 500 * time.Millisecond
-
- // PerformanceReplicationALPN is the negotiated protocol used for
- // performance replication.
- PerformanceReplicationALPN = "replication_v1"
-
- // DRReplicationALPN is the negotiated protocol used for
- // dr replication.
- DRReplicationALPN = "replication_dr_v1"
-
- perfStandbyALPN = "perf_standby_v1"
-
- requestForwardingALPN = "req_fw_sb-act_v1"
-)
-
-var (
- // Making this a package var allows tests to modify
- HeartbeatInterval = 5 * time.Second
-)
-
-type SecondaryConnsCacheVals struct {
- ID string
- Token string
- Connection net.Conn
- Mode consts.ReplicationState
-}
-
-// Starts the listeners and servers necessary to handle forwarded requests
-func (c *Core) startForwarding(ctx context.Context) error {
- c.logger.Debug("cluster listener setup function")
- defer c.logger.Debug("leaving cluster listener setup function")
-
- // Clean up in case we have transitioned from a client to a server
- c.requestForwardingConnectionLock.Lock()
- c.clearForwardingClients()
- c.requestForwardingConnectionLock.Unlock()
-
- // Resolve locally to avoid races
- ha := c.ha != nil
-
- var perfStandbyRepCluster *ReplicatedCluster
- if ha {
- id, err := uuid.GenerateUUID()
- if err != nil {
- return err
- }
-
- perfStandbyRepCluster = &ReplicatedCluster{
- State: consts.ReplicationPerformanceStandby,
- ClusterID: id,
- PrimaryClusterAddr: c.clusterAddr,
- }
- if err = c.setupReplicatedClusterPrimary(perfStandbyRepCluster); err != nil {
- return err
- }
- }
-
- // Get our TLS config
- tlsConfig, err := c.ClusterTLSConfig(ctx, nil, perfStandbyRepCluster)
- if err != nil {
- c.logger.Error("failed to get tls configuration when starting forwarding", "error", err)
- return err
- }
-
- // The server supports all of the possible protos
- tlsConfig.NextProtos = []string{"h2", requestForwardingALPN, perfStandbyALPN, PerformanceReplicationALPN, DRReplicationALPN}
-
- if !atomic.CompareAndSwapUint32(c.rpcServerActive, 0, 1) {
- c.logger.Warn("forwarding rpc server already running")
- return nil
- }
-
- fwRPCServer := grpc.NewServer(
- grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: 2 * HeartbeatInterval,
- }),
- grpc.MaxRecvMsgSize(math.MaxInt32),
- grpc.MaxSendMsgSize(math.MaxInt32),
- )
-
- // Setup performance standby RPC servers
- perfStandbyCount := 0
- if !c.IsDRSecondary() && !c.disablePerfStandby {
- perfStandbyCount = c.perfStandbyCount()
- }
- perfStandbySlots := make(chan struct{}, perfStandbyCount)
-
- perfStandbyCache := cache.New(2*HeartbeatInterval, 1*time.Second)
- perfStandbyCache.OnEvicted(func(secondaryID string, _ interface{}) {
- c.logger.Debug("removing performance standby", "id", secondaryID)
- c.removePerfStandbySecondary(context.Background(), secondaryID)
- select {
- case <-perfStandbySlots:
- default:
- c.logger.Warn("perf secondary timeout hit but no slot to free")
- }
- })
-
- perfStandbyReplicationRPCServer := perfStandbyRPCServer(c, perfStandbyCache)
-
- if ha && c.clusterHandler != nil {
- RegisterRequestForwardingServer(fwRPCServer, &forwardedRequestRPCServer{
- core: c,
- handler: c.clusterHandler,
- perfStandbySlots: perfStandbySlots,
- perfStandbyRepCluster: perfStandbyRepCluster,
- perfStandbyCache: perfStandbyCache,
- })
- }
-
- // Create the HTTP/2 server that will be shared by both RPC and regular
- // duties. Doing it this way instead of listening via the server and gRPC
- // allows us to re-use the same port via ALPN. We can just tell the server
- // to serve a given conn and which handler to use.
- fws := &http2.Server{
- // Our forwarding connections heartbeat regularly so anything else we
- // want to go away/get cleaned up pretty rapidly
- IdleTimeout: 5 * HeartbeatInterval,
- }
-
- // Shutdown coordination logic
- shutdown := new(uint32)
- shutdownWg := &sync.WaitGroup{}
-
- for _, addr := range c.clusterListenerAddrs {
- shutdownWg.Add(1)
-
- // Force a local resolution to avoid data races
- laddr := addr
-
- // Start our listening loop
- go func() {
- defer shutdownWg.Done()
-
- // closeCh is used to shutdown the spawned goroutines once this
- // function returns
- closeCh := make(chan struct{})
- defer func() {
- close(closeCh)
- }()
-
- if c.logger.IsInfo() {
- c.logger.Info("starting listener", "listener_address", laddr)
- }
-
- // Create a TCP listener. We do this separately and specifically
- // with TCP so that we can set deadlines.
- tcpLn, err := net.ListenTCP("tcp", laddr)
- if err != nil {
- c.logger.Error("error starting listener", "error", err)
- return
- }
-
- // Wrap the listener with TLS
- tlsLn := tls.NewListener(tcpLn, tlsConfig)
- defer tlsLn.Close()
-
- if c.logger.IsInfo() {
- c.logger.Info("serving cluster requests", "cluster_listen_address", tlsLn.Addr())
- }
-
- for {
- if atomic.LoadUint32(shutdown) > 0 {
- return
- }
-
- // Set the deadline for the accept call. If it passes we'll get
- // an error, causing us to check the condition at the top
- // again.
- tcpLn.SetDeadline(time.Now().Add(clusterListenerAcceptDeadline))
-
- // Accept the connection
- conn, err := tlsLn.Accept()
- if err != nil {
- if err, ok := err.(net.Error); ok && !err.Timeout() {
- c.logger.Debug("non-timeout error accepting on cluster port", "error", err)
- }
- if conn != nil {
- conn.Close()
- }
- continue
- }
- if conn == nil {
- continue
- }
-
- // Type assert to TLS connection and handshake to populate the
- // connection state
- tlsConn := conn.(*tls.Conn)
-
- // Set a deadline for the handshake. This will cause clients
- // that don't successfully auth to be kicked out quickly.
- // Cluster connections should be reliable so being marginally
- // aggressive here is fine.
- err = tlsConn.SetDeadline(time.Now().Add(30 * time.Second))
- if err != nil {
- if c.logger.IsDebug() {
- c.logger.Debug("error setting deadline for cluster connection", "error", err)
- }
- tlsConn.Close()
- continue
- }
-
- err = tlsConn.Handshake()
- if err != nil {
- if c.logger.IsDebug() {
- c.logger.Debug("error handshaking cluster connection", "error", err)
- }
- tlsConn.Close()
- continue
- }
-
- // Now, set it back to unlimited
- err = tlsConn.SetDeadline(time.Time{})
- if err != nil {
- if c.logger.IsDebug() {
- c.logger.Debug("error setting deadline for cluster connection", "error", err)
- }
- tlsConn.Close()
- continue
- }
-
- switch tlsConn.ConnectionState().NegotiatedProtocol {
- case requestForwardingALPN:
- if !ha {
- tlsConn.Close()
- continue
- }
-
- c.logger.Debug("got request forwarding connection")
-
- shutdownWg.Add(2)
- // quitCh is used to close the connection and the second
- // goroutine if the server closes before closeCh.
- quitCh := make(chan struct{})
- go func() {
- select {
- case <-quitCh:
- case <-closeCh:
- }
- tlsConn.Close()
- shutdownWg.Done()
- }()
-
- go func() {
- fws.ServeConn(tlsConn, &http2.ServeConnOpts{
- Handler: fwRPCServer,
- BaseConfig: &http.Server{
- ErrorLog: c.logger.StandardLogger(nil),
- },
- })
- // close the quitCh which will close the connection and
- // the other goroutine.
- close(quitCh)
- shutdownWg.Done()
- }()
-
- case PerformanceReplicationALPN, DRReplicationALPN, perfStandbyALPN:
- handleReplicationConn(ctx, c, shutdownWg, closeCh, fws, perfStandbyReplicationRPCServer, perfStandbyCache, tlsConn)
- default:
- c.logger.Debug("unknown negotiated protocol on cluster port")
- tlsConn.Close()
- continue
- }
- }
- }()
- }
-
- // This is in its own goroutine so that we don't block the main thread, and
- // thus we use atomic and channels to coordinate
- // However, because you can't query the status of a channel, we set a bool
- // here while we have the state lock to know whether to actually send a
- // shutdown (e.g. whether the channel will block). See issue #2083.
- c.clusterListenersRunning = true
- go func() {
- // If we get told to shut down...
- <-c.clusterListenerShutdownCh
-
- // Stop the RPC server
- c.logger.Info("shutting down forwarding rpc listeners")
- fwRPCServer.Stop()
-
- // Set the shutdown flag. This will cause the listeners to shut down
- // within the deadline in clusterListenerAcceptDeadline
- atomic.StoreUint32(shutdown, 1)
- c.logger.Info("forwarding rpc listeners stopped")
-
- // Wait for them all to shut down
- shutdownWg.Wait()
- c.logger.Info("rpc listeners successfully shut down")
-
- // Clear us up to run this function again
- atomic.StoreUint32(c.rpcServerActive, 0)
-
- // Tell the main thread that shutdown is done.
- c.clusterListenerShutdownSuccessCh <- struct{}{}
- }()
-
- return nil
-}
-
-// refreshRequestForwardingConnection ensures that the client/transport are
-// alive and that the current active address value matches the most
-// recently-known address.
-func (c *Core) refreshRequestForwardingConnection(ctx context.Context, clusterAddr string) error {
- c.logger.Debug("refreshing forwarding connection")
- defer c.logger.Debug("done refreshing forwarding connection")
-
- c.requestForwardingConnectionLock.Lock()
- defer c.requestForwardingConnectionLock.Unlock()
-
- // Clean things up first
- c.clearForwardingClients()
-
- // If we don't have anything to connect to, just return
- if clusterAddr == "" {
- return nil
- }
-
- clusterURL, err := url.Parse(clusterAddr)
- if err != nil {
- c.logger.Error("error parsing cluster address attempting to refresh forwarding connection", "error", err)
- return err
- }
-
- // Set up grpc forwarding handling
- // It's not really insecure, but we have to dial manually to get the
- // ALPN header right. It's just "insecure" because GRPC isn't managing
- // the TLS state.
- dctx, cancelFunc := context.WithCancel(ctx)
- c.rpcClientConn, err = grpc.DialContext(dctx, clusterURL.Host,
- grpc.WithDialer(c.getGRPCDialer(ctx, requestForwardingALPN, "", nil, nil, nil)),
- grpc.WithInsecure(), // it's not, we handle it in the dialer
- grpc.WithKeepaliveParams(keepalive.ClientParameters{
- Time: 2 * HeartbeatInterval,
- }),
- grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(math.MaxInt32),
- grpc.MaxCallSendMsgSize(math.MaxInt32),
- ))
- if err != nil {
- cancelFunc()
- c.logger.Error("err setting up forwarding rpc client", "error", err)
- return err
- }
- c.rpcClientConnContext = dctx
- c.rpcClientConnCancelFunc = cancelFunc
- c.rpcForwardingClient = &forwardingClient{
- RequestForwardingClient: NewRequestForwardingClient(c.rpcClientConn),
- core: c,
- echoTicker: time.NewTicker(HeartbeatInterval),
- echoContext: dctx,
- }
- c.rpcForwardingClient.startHeartbeat()
-
- return nil
-}
-
-func (c *Core) clearForwardingClients() {
- c.logger.Debug("clearing forwarding clients")
- defer c.logger.Debug("done clearing forwarding clients")
-
- if c.rpcClientConnCancelFunc != nil {
- c.rpcClientConnCancelFunc()
- c.rpcClientConnCancelFunc = nil
- }
- if c.rpcClientConn != nil {
- c.rpcClientConn.Close()
- c.rpcClientConn = nil
- }
-
- c.rpcClientConnContext = nil
- c.rpcForwardingClient = nil
-}
-
-// ForwardRequest forwards a given request to the active node and returns the
-// response.
-func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, error) {
- c.requestForwardingConnectionLock.RLock()
- defer c.requestForwardingConnectionLock.RUnlock()
-
- if c.rpcForwardingClient == nil {
- return 0, nil, nil, ErrCannotForward
- }
-
- origPath := req.URL.Path
- defer func() {
- req.URL.Path = origPath
- }()
-
- req.URL.Path = req.Context().Value("original_request_path").(string)
-
- freq, err := forwarding.GenerateForwardedRequest(req)
- if err != nil {
- c.logger.Error("error creating forwarding RPC request", "error", err)
- return 0, nil, nil, fmt.Errorf("error creating forwarding RPC request")
- }
- if freq == nil {
- c.logger.Error("got nil forwarding RPC request")
- return 0, nil, nil, fmt.Errorf("got nil forwarding RPC request")
- }
- resp, err := c.rpcForwardingClient.ForwardRequest(c.rpcClientConnContext, freq)
- if err != nil {
- c.logger.Error("error during forwarded RPC request", "error", err)
- return 0, nil, nil, fmt.Errorf("error during forwarding RPC request")
- }
-
- var header http.Header
- if resp.HeaderEntries != nil {
- header = make(http.Header)
- for k, v := range resp.HeaderEntries {
- header[k] = v.Values
- }
- }
-
- // If we are a perf standby and the request was forwarded to the active node
- // we should attempt to wait for the WAL to ship to offer best effort read after
- // write guarantees
- if c.perfStandby && resp.LastRemoteWal > 0 {
- WaitUntilWALShipped(req.Context(), c, resp.LastRemoteWal)
- }
-
- return int(resp.StatusCode), header, resp.Body, nil
-}
-
-// getGRPCDialer is used to return a dialer that has the correct TLS
-// configuration. Otherwise gRPC tries to be helpful and stomps all over our
-// NextProtos.
-func (c *Core) getGRPCDialer(ctx context.Context, alpnProto, serverName string, caCert *x509.Certificate, repClusters *ReplicatedClusters, perfStandbyCluster *ReplicatedCluster) func(string, time.Duration) (net.Conn, error) {
- return func(addr string, timeout time.Duration) (net.Conn, error) {
- tlsConfig, err := c.ClusterTLSConfig(ctx, repClusters, perfStandbyCluster)
- if err != nil {
- c.logger.Error("failed to get tls configuration", "error", err)
- return nil, err
- }
- if serverName != "" {
- tlsConfig.ServerName = serverName
- }
- if caCert != nil {
- pool := x509.NewCertPool()
- pool.AddCert(caCert)
- tlsConfig.RootCAs = pool
- tlsConfig.ClientCAs = pool
- }
- c.logger.Debug("creating rpc dialer", "host", tlsConfig.ServerName)
-
- tlsConfig.NextProtos = []string{alpnProto}
- dialer := &net.Dialer{
- Timeout: timeout,
- }
- return tls.DialWithDialer(dialer, "tcp", addr, tlsConfig)
- }
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc.go
deleted file mode 100644
index b3b6e0b0..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package vault
-
-import (
- "context"
- "net/http"
- "runtime"
- "sync/atomic"
- "time"
-
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/forwarding"
- cache "github.com/patrickmn/go-cache"
-)
-
-type forwardedRequestRPCServer struct {
- core *Core
- handler http.Handler
- perfStandbySlots chan struct{}
- perfStandbyRepCluster *ReplicatedCluster
- perfStandbyCache *cache.Cache
-}
-
-func (s *forwardedRequestRPCServer) ForwardRequest(ctx context.Context, freq *forwarding.Request) (*forwarding.Response, error) {
- // Parse an http.Request out of it
- req, err := forwarding.ParseForwardedRequest(freq)
- if err != nil {
- return nil, err
- }
-
- // A very dummy response writer that doesn't follow normal semantics, just
- // lets you write a status code (last written wins) and a body. But it
- // meets the interface requirements.
- w := forwarding.NewRPCResponseWriter()
-
- resp := &forwarding.Response{}
-
- runRequest := func() {
- defer func() {
- // Logic here comes mostly from the Go source code
- if err := recover(); err != nil {
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- s.core.logger.Error("panic serving forwarded request", "path", req.URL.Path, "error", err, "stacktrace", string(buf))
- }
- }()
- s.handler.ServeHTTP(w, req)
- }
- runRequest()
- resp.StatusCode = uint32(w.StatusCode())
- resp.Body = w.Body().Bytes()
-
- header := w.Header()
- if header != nil {
- resp.HeaderEntries = make(map[string]*forwarding.HeaderEntry, len(header))
- for k, v := range header {
- resp.HeaderEntries[k] = &forwarding.HeaderEntry{
- Values: v,
- }
- }
- }
-
- resp.LastRemoteWal = LastRemoteWAL(s.core)
-
- return resp, nil
-}
-
-func (s *forwardedRequestRPCServer) Echo(ctx context.Context, in *EchoRequest) (*EchoReply, error) {
- if in.ClusterAddr != "" {
- s.core.clusterPeerClusterAddrsCache.Set(in.ClusterAddr, nil, 0)
- }
- return &EchoReply{
- Message: "pong",
- ReplicationState: uint32(s.core.ReplicationState()),
- }, nil
-}
-
-type forwardingClient struct {
- RequestForwardingClient
-
- core *Core
-
- echoTicker *time.Ticker
- echoContext context.Context
-}
-
-// NOTE: we also take advantage of gRPC's keepalive bits, but as we send data
-// with these requests it's useful to keep this as well
-func (c *forwardingClient) startHeartbeat() {
- go func() {
- tick := func() {
- c.core.stateLock.RLock()
- clusterAddr := c.core.clusterAddr
- c.core.stateLock.RUnlock()
-
- ctx, cancel := context.WithTimeout(c.echoContext, 2*time.Second)
- resp, err := c.RequestForwardingClient.Echo(ctx, &EchoRequest{
- Message: "ping",
- ClusterAddr: clusterAddr,
- })
- cancel()
- if err != nil {
- c.core.logger.Debug("forwarding: error sending echo request to active node", "error", err)
- return
- }
- if resp == nil {
- c.core.logger.Debug("forwarding: empty echo response from active node")
- return
- }
- if resp.Message != "pong" {
- c.core.logger.Debug("forwarding: unexpected echo response from active node", "message", resp.Message)
- return
- }
- // Store the active node's replication state to display in
- // sys/health calls
- atomic.StoreUint32(c.core.activeNodeReplicationState, resp.ReplicationState)
- }
-
- tick()
-
- for {
- select {
- case <-c.echoContext.Done():
- c.echoTicker.Stop()
- c.core.logger.Debug("forwarding: stopping heartbeating")
- atomic.StoreUint32(c.core.activeNodeReplicationState, uint32(consts.ReplicationUnknown))
- return
- case <-c.echoTicker.C:
- tick()
- }
- }
- }()
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc_util.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc_util.go
deleted file mode 100644
index f4cd607d..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_rpc_util.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
-)
-
-func (s *forwardedRequestRPCServer) PerformanceStandbyElectionRequest(in *PerfStandbyElectionInput, reqServ RequestForwarding_PerformanceStandbyElectionRequestServer) error {
- return nil
-}
-
-type ReplicationTokenInfo struct{}
-
-func (c *forwardingClient) PerformanceStandbyElection(ctx context.Context) (*ReplicationTokenInfo, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go
deleted file mode 100644
index 6f1ce084..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.pb.go
+++ /dev/null
@@ -1,527 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: vault/request_forwarding_service.proto
-
-package vault
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- forwarding "github.com/hashicorp/vault/helper/forwarding"
- math "math"
-)
-
-import (
- context "golang.org/x/net/context"
- grpc "google.golang.org/grpc"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type EchoRequest struct {
- Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
- // ClusterAddr is used to send up a standby node's address to the active
- // node upon heartbeat
- ClusterAddr string `protobuf:"bytes,2,opt,name=cluster_addr,json=clusterAddr,proto3" json:"cluster_addr,omitempty"`
- // ClusterAddrs is used to send up a list of cluster addresses to a dr
- // primary from a dr secondary
- ClusterAddrs []string `protobuf:"bytes,3,rep,name=cluster_addrs,json=clusterAddrs,proto3" json:"cluster_addrs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EchoRequest) Reset() { *m = EchoRequest{} }
-func (m *EchoRequest) String() string { return proto.CompactTextString(m) }
-func (*EchoRequest) ProtoMessage() {}
-func (*EchoRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_f5f7512e4ab7b58a, []int{0}
-}
-
-func (m *EchoRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EchoRequest.Unmarshal(m, b)
-}
-func (m *EchoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EchoRequest.Marshal(b, m, deterministic)
-}
-func (m *EchoRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EchoRequest.Merge(m, src)
-}
-func (m *EchoRequest) XXX_Size() int {
- return xxx_messageInfo_EchoRequest.Size(m)
-}
-func (m *EchoRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_EchoRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EchoRequest proto.InternalMessageInfo
-
-func (m *EchoRequest) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *EchoRequest) GetClusterAddr() string {
- if m != nil {
- return m.ClusterAddr
- }
- return ""
-}
-
-func (m *EchoRequest) GetClusterAddrs() []string {
- if m != nil {
- return m.ClusterAddrs
- }
- return nil
-}
-
-type EchoReply struct {
- Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
- ClusterAddrs []string `protobuf:"bytes,2,rep,name=cluster_addrs,json=clusterAddrs,proto3" json:"cluster_addrs,omitempty"`
- ReplicationState uint32 `protobuf:"varint,3,opt,name=replication_state,json=replicationState,proto3" json:"replication_state,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EchoReply) Reset() { *m = EchoReply{} }
-func (m *EchoReply) String() string { return proto.CompactTextString(m) }
-func (*EchoReply) ProtoMessage() {}
-func (*EchoReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_f5f7512e4ab7b58a, []int{1}
-}
-
-func (m *EchoReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EchoReply.Unmarshal(m, b)
-}
-func (m *EchoReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EchoReply.Marshal(b, m, deterministic)
-}
-func (m *EchoReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EchoReply.Merge(m, src)
-}
-func (m *EchoReply) XXX_Size() int {
- return xxx_messageInfo_EchoReply.Size(m)
-}
-func (m *EchoReply) XXX_DiscardUnknown() {
- xxx_messageInfo_EchoReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EchoReply proto.InternalMessageInfo
-
-func (m *EchoReply) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *EchoReply) GetClusterAddrs() []string {
- if m != nil {
- return m.ClusterAddrs
- }
- return nil
-}
-
-func (m *EchoReply) GetReplicationState() uint32 {
- if m != nil {
- return m.ReplicationState
- }
- return 0
-}
-
-type ClientKey struct {
- Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
- X []byte `protobuf:"bytes,2,opt,name=x,proto3" json:"x,omitempty"`
- Y []byte `protobuf:"bytes,3,opt,name=y,proto3" json:"y,omitempty"`
- D []byte `protobuf:"bytes,4,opt,name=d,proto3" json:"d,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ClientKey) Reset() { *m = ClientKey{} }
-func (m *ClientKey) String() string { return proto.CompactTextString(m) }
-func (*ClientKey) ProtoMessage() {}
-func (*ClientKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_f5f7512e4ab7b58a, []int{2}
-}
-
-func (m *ClientKey) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ClientKey.Unmarshal(m, b)
-}
-func (m *ClientKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ClientKey.Marshal(b, m, deterministic)
-}
-func (m *ClientKey) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClientKey.Merge(m, src)
-}
-func (m *ClientKey) XXX_Size() int {
- return xxx_messageInfo_ClientKey.Size(m)
-}
-func (m *ClientKey) XXX_DiscardUnknown() {
- xxx_messageInfo_ClientKey.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClientKey proto.InternalMessageInfo
-
-func (m *ClientKey) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *ClientKey) GetX() []byte {
- if m != nil {
- return m.X
- }
- return nil
-}
-
-func (m *ClientKey) GetY() []byte {
- if m != nil {
- return m.Y
- }
- return nil
-}
-
-func (m *ClientKey) GetD() []byte {
- if m != nil {
- return m.D
- }
- return nil
-}
-
-type PerfStandbyElectionInput struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PerfStandbyElectionInput) Reset() { *m = PerfStandbyElectionInput{} }
-func (m *PerfStandbyElectionInput) String() string { return proto.CompactTextString(m) }
-func (*PerfStandbyElectionInput) ProtoMessage() {}
-func (*PerfStandbyElectionInput) Descriptor() ([]byte, []int) {
- return fileDescriptor_f5f7512e4ab7b58a, []int{3}
-}
-
-func (m *PerfStandbyElectionInput) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PerfStandbyElectionInput.Unmarshal(m, b)
-}
-func (m *PerfStandbyElectionInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PerfStandbyElectionInput.Marshal(b, m, deterministic)
-}
-func (m *PerfStandbyElectionInput) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PerfStandbyElectionInput.Merge(m, src)
-}
-func (m *PerfStandbyElectionInput) XXX_Size() int {
- return xxx_messageInfo_PerfStandbyElectionInput.Size(m)
-}
-func (m *PerfStandbyElectionInput) XXX_DiscardUnknown() {
- xxx_messageInfo_PerfStandbyElectionInput.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PerfStandbyElectionInput proto.InternalMessageInfo
-
-type PerfStandbyElectionResponse struct {
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
- PrimaryClusterAddr string `protobuf:"bytes,3,opt,name=primary_cluster_addr,json=primaryClusterAddr,proto3" json:"primary_cluster_addr,omitempty"`
- CaCert []byte `protobuf:"bytes,4,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"`
- ClientCert []byte `protobuf:"bytes,5,opt,name=client_cert,json=clientCert,proto3" json:"client_cert,omitempty"`
- ClientKey *ClientKey `protobuf:"bytes,6,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PerfStandbyElectionResponse) Reset() { *m = PerfStandbyElectionResponse{} }
-func (m *PerfStandbyElectionResponse) String() string { return proto.CompactTextString(m) }
-func (*PerfStandbyElectionResponse) ProtoMessage() {}
-func (*PerfStandbyElectionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_f5f7512e4ab7b58a, []int{4}
-}
-
-func (m *PerfStandbyElectionResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PerfStandbyElectionResponse.Unmarshal(m, b)
-}
-func (m *PerfStandbyElectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PerfStandbyElectionResponse.Marshal(b, m, deterministic)
-}
-func (m *PerfStandbyElectionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PerfStandbyElectionResponse.Merge(m, src)
-}
-func (m *PerfStandbyElectionResponse) XXX_Size() int {
- return xxx_messageInfo_PerfStandbyElectionResponse.Size(m)
-}
-func (m *PerfStandbyElectionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_PerfStandbyElectionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PerfStandbyElectionResponse proto.InternalMessageInfo
-
-func (m *PerfStandbyElectionResponse) GetId() string {
- if m != nil {
- return m.Id
- }
- return ""
-}
-
-func (m *PerfStandbyElectionResponse) GetClusterId() string {
- if m != nil {
- return m.ClusterId
- }
- return ""
-}
-
-func (m *PerfStandbyElectionResponse) GetPrimaryClusterAddr() string {
- if m != nil {
- return m.PrimaryClusterAddr
- }
- return ""
-}
-
-func (m *PerfStandbyElectionResponse) GetCaCert() []byte {
- if m != nil {
- return m.CaCert
- }
- return nil
-}
-
-func (m *PerfStandbyElectionResponse) GetClientCert() []byte {
- if m != nil {
- return m.ClientCert
- }
- return nil
-}
-
-func (m *PerfStandbyElectionResponse) GetClientKey() *ClientKey {
- if m != nil {
- return m.ClientKey
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*EchoRequest)(nil), "vault.EchoRequest")
- proto.RegisterType((*EchoReply)(nil), "vault.EchoReply")
- proto.RegisterType((*ClientKey)(nil), "vault.ClientKey")
- proto.RegisterType((*PerfStandbyElectionInput)(nil), "vault.PerfStandbyElectionInput")
- proto.RegisterType((*PerfStandbyElectionResponse)(nil), "vault.PerfStandbyElectionResponse")
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// RequestForwardingClient is the client API for RequestForwarding service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type RequestForwardingClient interface {
- ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error)
- Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error)
- PerformanceStandbyElectionRequest(ctx context.Context, in *PerfStandbyElectionInput, opts ...grpc.CallOption) (RequestForwarding_PerformanceStandbyElectionRequestClient, error)
-}
-
-type requestForwardingClient struct {
- cc *grpc.ClientConn
-}
-
-func NewRequestForwardingClient(cc *grpc.ClientConn) RequestForwardingClient {
- return &requestForwardingClient{cc}
-}
-
-func (c *requestForwardingClient) ForwardRequest(ctx context.Context, in *forwarding.Request, opts ...grpc.CallOption) (*forwarding.Response, error) {
- out := new(forwarding.Response)
- err := c.cc.Invoke(ctx, "/vault.RequestForwarding/ForwardRequest", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *requestForwardingClient) Echo(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoReply, error) {
- out := new(EchoReply)
- err := c.cc.Invoke(ctx, "/vault.RequestForwarding/Echo", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *requestForwardingClient) PerformanceStandbyElectionRequest(ctx context.Context, in *PerfStandbyElectionInput, opts ...grpc.CallOption) (RequestForwarding_PerformanceStandbyElectionRequestClient, error) {
- stream, err := c.cc.NewStream(ctx, &_RequestForwarding_serviceDesc.Streams[0], "/vault.RequestForwarding/PerformanceStandbyElectionRequest", opts...)
- if err != nil {
- return nil, err
- }
- x := &requestForwardingPerformanceStandbyElectionRequestClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type RequestForwarding_PerformanceStandbyElectionRequestClient interface {
- Recv() (*PerfStandbyElectionResponse, error)
- grpc.ClientStream
-}
-
-type requestForwardingPerformanceStandbyElectionRequestClient struct {
- grpc.ClientStream
-}
-
-func (x *requestForwardingPerformanceStandbyElectionRequestClient) Recv() (*PerfStandbyElectionResponse, error) {
- m := new(PerfStandbyElectionResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// RequestForwardingServer is the server API for RequestForwarding service.
-type RequestForwardingServer interface {
- ForwardRequest(context.Context, *forwarding.Request) (*forwarding.Response, error)
- Echo(context.Context, *EchoRequest) (*EchoReply, error)
- PerformanceStandbyElectionRequest(*PerfStandbyElectionInput, RequestForwarding_PerformanceStandbyElectionRequestServer) error
-}
-
-func RegisterRequestForwardingServer(s *grpc.Server, srv RequestForwardingServer) {
- s.RegisterService(&_RequestForwarding_serviceDesc, srv)
-}
-
-func _RequestForwarding_ForwardRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(forwarding.Request)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(RequestForwardingServer).ForwardRequest(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/vault.RequestForwarding/ForwardRequest",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(RequestForwardingServer).ForwardRequest(ctx, req.(*forwarding.Request))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _RequestForwarding_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(EchoRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(RequestForwardingServer).Echo(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/vault.RequestForwarding/Echo",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(RequestForwardingServer).Echo(ctx, req.(*EchoRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _RequestForwarding_PerformanceStandbyElectionRequest_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(PerfStandbyElectionInput)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(RequestForwardingServer).PerformanceStandbyElectionRequest(m, &requestForwardingPerformanceStandbyElectionRequestServer{stream})
-}
-
-type RequestForwarding_PerformanceStandbyElectionRequestServer interface {
- Send(*PerfStandbyElectionResponse) error
- grpc.ServerStream
-}
-
-type requestForwardingPerformanceStandbyElectionRequestServer struct {
- grpc.ServerStream
-}
-
-func (x *requestForwardingPerformanceStandbyElectionRequestServer) Send(m *PerfStandbyElectionResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-var _RequestForwarding_serviceDesc = grpc.ServiceDesc{
- ServiceName: "vault.RequestForwarding",
- HandlerType: (*RequestForwardingServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "ForwardRequest",
- Handler: _RequestForwarding_ForwardRequest_Handler,
- },
- {
- MethodName: "Echo",
- Handler: _RequestForwarding_Echo_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "PerformanceStandbyElectionRequest",
- Handler: _RequestForwarding_PerformanceStandbyElectionRequest_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "vault/request_forwarding_service.proto",
-}
-
-func init() {
- proto.RegisterFile("vault/request_forwarding_service.proto", fileDescriptor_f5f7512e4ab7b58a)
-}
-
-var fileDescriptor_f5f7512e4ab7b58a = []byte{
- // 493 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x41, 0x6f, 0x1a, 0x3d,
- 0x10, 0x8d, 0x81, 0x10, 0x31, 0x90, 0x88, 0xf8, 0x8b, 0xf4, 0xad, 0xa8, 0xa2, 0x90, 0xad, 0x54,
- 0x21, 0x55, 0xda, 0x8d, 0xd2, 0x73, 0x0f, 0x2d, 0x4a, 0x25, 0xd4, 0x4b, 0xb5, 0xb9, 0xf5, 0xb2,
- 0x32, 0xf6, 0x04, 0xac, 0x2e, 0x6b, 0xd7, 0x36, 0x49, 0xf6, 0x27, 0xf7, 0xd6, 0x9f, 0x50, 0xad,
- 0xd7, 0x04, 0x10, 0x4d, 0x2f, 0x68, 0xe7, 0xcd, 0x63, 0xde, 0xf8, 0xf9, 0x19, 0xde, 0x3d, 0xb2,
- 0x75, 0xe1, 0x52, 0x83, 0x3f, 0xd7, 0x68, 0x5d, 0xfe, 0xa0, 0xcc, 0x13, 0x33, 0x42, 0x96, 0x8b,
- 0xdc, 0xa2, 0x79, 0x94, 0x1c, 0x13, 0x6d, 0x94, 0x53, 0xf4, 0xd8, 0xf3, 0x46, 0x97, 0x4b, 0x2c,
- 0x34, 0x9a, 0x74, 0xcb, 0x4b, 0x5d, 0xa5, 0xd1, 0x36, 0xac, 0x58, 0x41, 0xff, 0x8e, 0x2f, 0x55,
- 0xd6, 0x4c, 0xa3, 0x11, 0x9c, 0xac, 0xd0, 0x5a, 0xb6, 0xc0, 0x88, 0x8c, 0xc9, 0xa4, 0x97, 0x6d,
- 0x4a, 0x7a, 0x0d, 0x03, 0x5e, 0xac, 0xad, 0x43, 0x93, 0x33, 0x21, 0x4c, 0xd4, 0xf2, 0xed, 0x7e,
- 0xc0, 0x3e, 0x09, 0x61, 0xe8, 0x5b, 0x38, 0xdd, 0xa5, 0xd8, 0xa8, 0x3d, 0x6e, 0x4f, 0x7a, 0xd9,
- 0x60, 0x87, 0x63, 0xe3, 0x27, 0xe8, 0x35, 0x82, 0xba, 0xa8, 0xfe, 0x21, 0x77, 0x30, 0xab, 0x75,
- 0x38, 0x8b, 0xbe, 0x87, 0x73, 0x83, 0xba, 0x90, 0x9c, 0x39, 0xa9, 0xca, 0xdc, 0x3a, 0xe6, 0x30,
- 0x6a, 0x8f, 0xc9, 0xe4, 0x34, 0x1b, 0xee, 0x34, 0xee, 0x6b, 0x3c, 0x9e, 0x41, 0x6f, 0x5a, 0x48,
- 0x2c, 0xdd, 0x57, 0xac, 0x28, 0x85, 0x4e, 0xed, 0x42, 0x50, 0xf5, 0xdf, 0x74, 0x00, 0xe4, 0xd9,
- 0x1f, 0x6b, 0x90, 0x91, 0xe7, 0xba, 0xaa, 0xfc, 0xac, 0x41, 0x46, 0xaa, 0xba, 0x12, 0x51, 0xa7,
- 0xa9, 0x44, 0x3c, 0x82, 0xe8, 0x1b, 0x9a, 0x87, 0x7b, 0xc7, 0x4a, 0x31, 0xaf, 0xee, 0x0a, 0xe4,
- 0xb5, 0xcc, 0xac, 0xd4, 0x6b, 0x17, 0xff, 0x22, 0xf0, 0xe6, 0x2f, 0xcd, 0x0c, 0xad, 0x56, 0xa5,
- 0x45, 0x7a, 0x06, 0x2d, 0x29, 0x82, 0x6e, 0x4b, 0x0a, 0x7a, 0x09, 0xb0, 0x39, 0xa8, 0x14, 0xc1,
- 0xd5, 0x5e, 0x40, 0x66, 0x82, 0xde, 0xc0, 0x85, 0x36, 0x72, 0xc5, 0x4c, 0x95, 0xef, 0xd9, 0xdf,
- 0xf6, 0x44, 0x1a, 0x7a, 0xd3, 0x9d, 0x5b, 0xf8, 0x1f, 0x4e, 0x38, 0xcb, 0x39, 0x1a, 0x17, 0x16,
- 0xee, 0x72, 0x36, 0x45, 0xe3, 0xe8, 0x15, 0xf4, 0xb9, 0x37, 0xa0, 0x69, 0x1e, 0xfb, 0x26, 0x34,
- 0x90, 0x27, 0xa4, 0x10, 0xaa, 0xfc, 0x07, 0x56, 0x51, 0x77, 0x4c, 0x26, 0xfd, 0xdb, 0x61, 0xe2,
- 0x63, 0x94, 0xbc, 0x58, 0x57, 0x2f, 0x17, 0x3e, 0x6f, 0x7f, 0x13, 0x38, 0x0f, 0xc9, 0xf9, 0xf2,
- 0x12, 0x2f, 0xfa, 0x11, 0xce, 0x42, 0xb5, 0x49, 0xd5, 0x7f, 0xc9, 0x36, 0x7d, 0x49, 0x00, 0x47,
- 0x17, 0xfb, 0x60, 0x63, 0x4f, 0x7c, 0x44, 0x13, 0xe8, 0xd4, 0x01, 0xa1, 0x34, 0x28, 0xef, 0xc4,
- 0x73, 0x34, 0xdc, 0xc3, 0x74, 0x51, 0xc5, 0x47, 0xb4, 0x80, 0xeb, 0xda, 0x6f, 0x65, 0x56, 0xac,
- 0xe4, 0x78, 0x60, 0x7b, 0xb3, 0xc1, 0x55, 0xf8, 0xe3, 0x6b, 0xd7, 0x36, 0x8a, 0x5f, 0x27, 0x6c,
- 0x77, 0xbb, 0x21, 0x9f, 0xe3, 0xef, 0xe3, 0x85, 0x74, 0xcb, 0xf5, 0x3c, 0xe1, 0x6a, 0x95, 0x2e,
- 0x99, 0x5d, 0x4a, 0xae, 0x8c, 0x4e, 0x9b, 0x47, 0xe9, 0x7f, 0xe7, 0x5d, 0xff, 0xb4, 0x3e, 0xfc,
- 0x09, 0x00, 0x00, 0xff, 0xff, 0x03, 0x94, 0x0a, 0x17, 0xaa, 0x03, 0x00, 0x00,
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto b/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto
deleted file mode 100644
index 3429aaf5..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_service.proto
+++ /dev/null
@@ -1,46 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/hashicorp/vault/vault";
-
-import "helper/forwarding/types.proto";
-
-package vault;
-
-message EchoRequest {
- string message = 1;
- // ClusterAddr is used to send up a standby node's address to the active
- // node upon heartbeat
- string cluster_addr = 2;
- // ClusterAddrs is used to send up a list of cluster addresses to a dr
- // primary from a dr secondary
- repeated string cluster_addrs = 3;
-}
-
-message EchoReply {
- string message = 1;
- repeated string cluster_addrs = 2;
- uint32 replication_state = 3;
-}
-
-message ClientKey {
- string type = 1;
- bytes x = 2;
- bytes y = 3;
- bytes d = 4;
-}
-
-message PerfStandbyElectionInput {}
-message PerfStandbyElectionResponse {
- string id = 1;
- string cluster_id = 2;
- string primary_cluster_addr = 3;
- bytes ca_cert = 4;
- bytes client_cert = 5;
- ClientKey client_key = 6;
-}
-
-service RequestForwarding {
- rpc ForwardRequest(forwarding.Request) returns (forwarding.Response) {}
- rpc Echo(EchoRequest) returns (EchoReply) {}
- rpc PerformanceStandbyElectionRequest(PerfStandbyElectionInput) returns (stream PerfStandbyElectionResponse) {}
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_forwarding_util.go b/vendor/github.com/hashicorp/vault/vault/request_forwarding_util.go
deleted file mode 100644
index 20fae15f..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_forwarding_util.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
- "crypto/tls"
- "sync"
-
- cache "github.com/patrickmn/go-cache"
- "golang.org/x/net/http2"
- grpc "google.golang.org/grpc"
-)
-
-func perfStandbyRPCServer(*Core, *cache.Cache) *grpc.Server { return nil }
-
-func handleReplicationConn(context.Context, *Core, *sync.WaitGroup, chan struct{}, *http2.Server, *grpc.Server, *cache.Cache, *tls.Conn) {
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling.go b/vendor/github.com/hashicorp/vault/vault/request_handling.go
deleted file mode 100644
index eb432666..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_handling.go
+++ /dev/null
@@ -1,1141 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/go-multierror"
- sockaddr "github.com/hashicorp/go-sockaddr"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/errutil"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/helper/wrapping"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
-)
-
-const (
- replTimeout = 10 * time.Second
-)
-
-var (
- // DefaultMaxRequestDuration is the amount of time we'll wait for a request
- // to complete, unless overridden on a per-handler basis
- DefaultMaxRequestDuration = 90 * time.Second
-
- egpDebugLogging bool
-)
-
-// HandlerProperties is used to seed configuration into a vaulthttp.Handler.
-// It's in this package to avoid a circular dependency
-type HandlerProperties struct {
- Core *Core
- MaxRequestSize int64
- MaxRequestDuration time.Duration
- DisablePrintableCheck bool
-}
-
-// fetchEntityAndDerivedPolicies returns the entity object for the given entity
-// ID. If the entity is merged into a different entity object, the entity into
-// which the given entity ID is merged into will be returned. This function
-// also returns the cumulative list of policies that the entity is entitled to.
-// This list includes the policies from the entity itself and from all the
-// groups in which the given entity ID is a member of.
-func (c *Core) fetchEntityAndDerivedPolicies(ctx context.Context, tokenNS *namespace.Namespace, entityID string) (*identity.Entity, map[string][]string, error) {
- if entityID == "" || c.identityStore == nil {
- return nil, nil, nil
- }
-
- //c.logger.Debug("entity set on the token", "entity_id", te.EntityID)
-
- // Fetch the entity
- entity, err := c.identityStore.MemDBEntityByID(entityID, false)
- if err != nil {
- c.logger.Error("failed to lookup entity using its ID", "error", err)
- return nil, nil, err
- }
-
- if entity == nil {
- // If there was no corresponding entity object found, it is
- // possible that the entity got merged into another entity. Try
- // finding entity based on the merged entity index.
- entity, err = c.identityStore.MemDBEntityByMergedEntityID(entityID, false)
- if err != nil {
- c.logger.Error("failed to lookup entity in merged entity ID index", "error", err)
- return nil, nil, err
- }
- }
-
- policies := make(map[string][]string)
- if entity != nil {
- //c.logger.Debug("entity successfully fetched; adding entity policies to token's policies to create ACL")
-
- // Attach the policies on the entity
- if len(entity.Policies) != 0 {
- policies[entity.NamespaceID] = append(policies[entity.NamespaceID], entity.Policies...)
- }
-
- groupPolicies, err := c.identityStore.groupPoliciesByEntityID(entity.ID)
- if err != nil {
- c.logger.Error("failed to fetch group policies", "error", err)
- return nil, nil, err
- }
-
- // Filter and add the policies to the resultant set
- for nsID, nsPolicies := range groupPolicies {
- ns, err := NamespaceByID(ctx, nsID, c)
- if err != nil {
- return nil, nil, err
- }
- if ns == nil {
- return nil, nil, namespace.ErrNoNamespace
- }
- if tokenNS.Path != ns.Path && !ns.HasParent(tokenNS) {
- continue
- }
- nsPolicies = strutil.RemoveDuplicates(nsPolicies, false)
- if len(nsPolicies) != 0 {
- policies[nsID] = append(policies[nsID], nsPolicies...)
- }
- }
- }
-
- return entity, policies, err
-}
-
-func (c *Core) fetchACLTokenEntryAndEntity(ctx context.Context, req *logical.Request) (*ACL, *logical.TokenEntry, *identity.Entity, map[string][]string, error) {
- defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now())
-
- // Ensure there is a client token
- if req.ClientToken == "" {
- return nil, nil, nil, nil, fmt.Errorf("missing client token")
- }
-
- if c.tokenStore == nil {
- c.logger.Error("token store is unavailable")
- return nil, nil, nil, nil, ErrInternalError
- }
-
- // Resolve the token policy
- var te *logical.TokenEntry
- switch req.TokenEntry() {
- case nil:
- var err error
- te, err = c.tokenStore.Lookup(ctx, req.ClientToken)
- if err != nil {
- c.logger.Error("failed to lookup token", "error", err)
- return nil, nil, nil, nil, ErrInternalError
- }
- default:
- te = req.TokenEntry()
- }
-
- // Ensure the token is valid
- if te == nil {
- return nil, nil, nil, nil, logical.ErrPermissionDenied
- }
-
- // CIDR checks bind all tokens except non-expiring root tokens
- if te.TTL != 0 && len(te.BoundCIDRs) > 0 {
- var valid bool
- remoteSockAddr, err := sockaddr.NewSockAddr(req.Connection.RemoteAddr)
- if err != nil {
- if c.Logger().IsDebug() {
- c.Logger().Debug("could not parse remote addr into sockaddr", "error", err, "remote_addr", req.Connection.RemoteAddr)
- }
- return nil, nil, nil, nil, logical.ErrPermissionDenied
- }
- for _, cidr := range te.BoundCIDRs {
- if cidr.Contains(remoteSockAddr) {
- valid = true
- break
- }
- }
- if !valid {
- return nil, nil, nil, nil, logical.ErrPermissionDenied
- }
- }
-
- policies := make(map[string][]string)
- // Add tokens policies
- policies[te.NamespaceID] = append(policies[te.NamespaceID], te.Policies...)
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c)
- if err != nil {
- c.logger.Error("failed to fetch token namespace", "error", err)
- return nil, nil, nil, nil, ErrInternalError
- }
- if tokenNS == nil {
- c.logger.Error("failed to fetch token namespace", "error", namespace.ErrNoNamespace)
- return nil, nil, nil, nil, ErrInternalError
- }
-
- // Add identity policies from all the namespaces
- entity, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, te.EntityID)
- if err != nil {
- return nil, nil, nil, nil, ErrInternalError
- }
- for nsID, nsPolicies := range identityPolicies {
- policies[nsID] = append(policies[nsID], nsPolicies...)
- }
-
- // Attach token's namespace information to the context. Wrapping tokens by
- // should be able to be used anywhere, so we also special case behavior.
- var tokenCtx context.Context
- if len(policies) == 1 &&
- len(policies[te.NamespaceID]) == 1 &&
- (policies[te.NamespaceID][0] == responseWrappingPolicyName ||
- policies[te.NamespaceID][0] == controlGroupPolicyName) &&
- (strings.HasSuffix(req.Path, "sys/wrapping/unwrap") ||
- strings.HasSuffix(req.Path, "sys/wrapping/lookup") ||
- strings.HasSuffix(req.Path, "sys/wrapping/rewrap")) {
- // Use the request namespace; will find the copy of the policy for the
- // local namespace
- tokenCtx = ctx
- } else {
- // Use the token's namespace for looking up policy
- tokenCtx = namespace.ContextWithNamespace(ctx, tokenNS)
- }
-
- // Construct the corresponding ACL object. ACL construction should be
- // performed on the token's namespace.
- acl, err := c.policyStore.ACL(tokenCtx, entity, policies)
- if err != nil {
- if errwrap.ContainsType(err, new(TemplateError)) {
- return nil, nil, nil, nil, err
- }
- c.logger.Error("failed to construct ACL", "error", err)
- return nil, nil, nil, nil, ErrInternalError
- }
-
- return acl, te, entity, identityPolicies, nil
-}
-
-func (c *Core) checkToken(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *logical.TokenEntry, error) {
- defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now())
-
- var acl *ACL
- var te *logical.TokenEntry
- var entity *identity.Entity
- var identityPolicies map[string][]string
- var err error
-
- // Even if unauth, if a token is provided, there's little reason not to
- // gather as much info as possible for the audit log and to e.g. control
- // trace mode for EGPs.
- if !unauth || (unauth && req.ClientToken != "") {
- acl, te, entity, identityPolicies, err = c.fetchACLTokenEntryAndEntity(ctx, req)
- // In the unauth case we don't want to fail the command, since it's
- // unauth, we just have no information to attach to the request, so
- // ignore errors...this was best-effort anyways
- if err != nil && !unauth {
- if errwrap.ContainsType(err, new(TemplateError)) {
- c.logger.Warn("permission denied due to a templated policy being invalid or containing directives not satisfied by the requestor")
- err = logical.ErrPermissionDenied
- }
- return nil, te, err
- }
- }
-
- if entity != nil && entity.Disabled {
- c.logger.Warn("permission denied as the entity on the token is disabled")
- return nil, te, logical.ErrPermissionDenied
- }
- if te != nil && te.EntityID != "" && entity == nil {
- c.logger.Warn("permission denied as the entity on the token is invalid")
- return nil, te, logical.ErrPermissionDenied
- }
-
- // Check if this is a root protected path
- rootPath := c.router.RootPath(ctx, req.Path)
-
- if rootPath && unauth {
- return nil, nil, errors.New("cannot access root path in unauthenticated request")
- }
-
- // When we receive a write of either type, rather than require clients to
- // PUT/POST and trust the operation, we ask the backend to give us the real
- // skinny -- if the backend implements an existence check, it can tell us
- // whether a particular resource exists. Then we can mark it as an update
- // or creation as appropriate.
- if req.Operation == logical.CreateOperation || req.Operation == logical.UpdateOperation {
- existsResp, checkExists, resourceExists, err := c.router.RouteExistenceCheck(ctx, req)
- switch err {
- case logical.ErrUnsupportedPath:
- // fail later via bad path to avoid confusing items in the log
- checkExists = false
- case nil:
- if existsResp != nil && existsResp.IsError() {
- return nil, te, existsResp.Error()
- }
- // Otherwise, continue on
- default:
- c.logger.Error("failed to run existence check", "error", err)
- if _, ok := err.(errutil.UserError); ok {
- return nil, te, err
- } else {
- return nil, te, ErrInternalError
- }
- }
-
- switch {
- case checkExists == false:
- // No existence check, so always treat it as an update operation, which is how it is pre 0.5
- req.Operation = logical.UpdateOperation
- case resourceExists == true:
- // It exists, so force an update operation
- req.Operation = logical.UpdateOperation
- case resourceExists == false:
- // It doesn't exist, force a create operation
- req.Operation = logical.CreateOperation
- default:
- panic("unreachable code")
- }
- }
- // Create the auth response
- auth := &logical.Auth{
- ClientToken: req.ClientToken,
- Accessor: req.ClientTokenAccessor,
- }
-
- if te != nil {
- auth.IdentityPolicies = identityPolicies[te.NamespaceID]
- auth.TokenPolicies = te.Policies
- auth.Policies = append(te.Policies, identityPolicies[te.NamespaceID]...)
- auth.Metadata = te.Meta
- auth.DisplayName = te.DisplayName
- auth.EntityID = te.EntityID
- delete(identityPolicies, te.NamespaceID)
- auth.ExternalNamespacePolicies = identityPolicies
- // Store the entity ID in the request object
- req.EntityID = te.EntityID
- auth.TokenType = te.Type
- }
-
- // Check the standard non-root ACLs. Return the token entry if it's not
- // allowed so we can decrement the use count.
- authResults := c.performPolicyChecks(ctx, acl, te, req, entity, &PolicyCheckOpts{
- Unauth: unauth,
- RootPrivsRequired: rootPath,
- })
-
- if !authResults.Allowed {
- retErr := authResults.Error
- if authResults.Error.ErrorOrNil() == nil || authResults.DeniedError {
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- }
- return auth, te, retErr
- }
-
- return auth, te, nil
-}
-
-// HandleRequest is used to handle a new incoming request
-func (c *Core) HandleRequest(httpCtx context.Context, req *logical.Request) (resp *logical.Response, err error) {
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.Sealed() {
- return nil, consts.ErrSealed
- }
- if c.standby && !c.perfStandby {
- return nil, consts.ErrStandby
- }
-
- ctx, cancel := context.WithCancel(c.activeContext)
- defer cancel()
-
- go func(ctx context.Context, httpCtx context.Context) {
- select {
- case <-ctx.Done():
- case <-httpCtx.Done():
- cancel()
- }
- }(ctx, httpCtx)
-
- // Allowing writing to a path ending in / makes it extremely difficult to
- // understand user intent for the filesystem-like backends (kv,
- // cubbyhole) -- did they want a key named foo/ or did they want to write
- // to a directory foo/ with no (or forgotten) key, or...? It also affects
- // lookup, because paths ending in / are considered prefixes by some
- // backends. Basically, it's all just terrible, so don't allow it.
- if strings.HasSuffix(req.Path, "/") &&
- (req.Operation == logical.UpdateOperation ||
- req.Operation == logical.CreateOperation) {
- return logical.ErrorResponse("cannot write to a path ending in '/'"), nil
- }
-
- err = waitForReplicationState(ctx, c, req)
- if err != nil {
- return nil, err
- }
-
- ns, err := namespace.FromContext(httpCtx)
- if err != nil {
- return nil, errwrap.Wrapf("could not parse namespace from http context: {{err}}", err)
- }
- ctx = namespace.ContextWithNamespace(ctx, ns)
-
- if !hasNamespaces(c) && ns.Path != "" {
- return nil, logical.CodedError(403, "namespaces feature not enabled")
- }
-
- var auth *logical.Auth
- if c.router.LoginPath(ctx, req.Path) {
- resp, auth, err = c.handleLoginRequest(ctx, req)
- } else {
- resp, auth, err = c.handleRequest(ctx, req)
- }
-
- // Ensure we don't leak internal data
- if resp != nil {
- if resp.Secret != nil {
- resp.Secret.InternalData = nil
- }
- if resp.Auth != nil {
- resp.Auth.InternalData = nil
- }
- }
-
- // We are wrapping if there is anything to wrap (not a nil response) and a
- // TTL was specified for the token. Errors on a call should be returned to
- // the caller, so wrapping is turned off if an error is hit and the error
- // is logged to the audit log.
- wrapping := resp != nil &&
- err == nil &&
- !resp.IsError() &&
- resp.WrapInfo != nil &&
- resp.WrapInfo.TTL != 0 &&
- resp.WrapInfo.Token == ""
-
- if wrapping {
- cubbyResp, cubbyErr := c.wrapInCubbyhole(ctx, req, resp, auth)
- // If not successful, returns either an error response from the
- // cubbyhole backend or an error; if either is set, set resp and err to
- // those and continue so that that's what we audit log. Otherwise
- // finish the wrapping and audit log that.
- if cubbyResp != nil || cubbyErr != nil {
- resp = cubbyResp
- err = cubbyErr
- } else {
- wrappingResp := &logical.Response{
- WrapInfo: resp.WrapInfo,
- Warnings: resp.Warnings,
- }
- resp = wrappingResp
- }
- }
-
- auditResp := resp
- // When unwrapping we want to log the actual response that will be written
- // out. We still want to return the raw value to avoid automatic updating
- // to any of it.
- if req.Path == "sys/wrapping/unwrap" &&
- resp != nil &&
- resp.Data != nil &&
- resp.Data[logical.HTTPRawBody] != nil {
-
- // Decode the JSON
- if resp.Data[logical.HTTPRawBodyAlreadyJSONDecoded] != nil {
- delete(resp.Data, logical.HTTPRawBodyAlreadyJSONDecoded)
- } else {
- httpResp := &logical.HTTPResponse{}
- err := jsonutil.DecodeJSON(resp.Data[logical.HTTPRawBody].([]byte), httpResp)
- if err != nil {
- c.logger.Error("failed to unmarshal wrapped HTTP response for audit logging", "error", err)
- return nil, ErrInternalError
- }
-
- auditResp = logical.HTTPResponseToLogicalResponse(httpResp)
- }
- }
-
- var nonHMACReqDataKeys []string
- var nonHMACRespDataKeys []string
- entry := c.router.MatchingMountEntry(ctx, req.Path)
- if entry != nil {
- // Get and set ignored HMAC'd value. Reset those back to empty afterwards.
- if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
- nonHMACReqDataKeys = rawVals.([]string)
- }
-
- // Get and set ignored HMAC'd value. Reset those back to empty afterwards.
- if auditResp != nil {
- if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_response_keys"); ok {
- nonHMACRespDataKeys = rawVals.([]string)
- }
- }
- }
-
- // Create an audit trail of the response
- if !isControlGroupRun(req) {
- logInput := &audit.LogInput{
- Auth: auth,
- Request: req,
- Response: auditResp,
- OuterErr: err,
- NonHMACReqDataKeys: nonHMACReqDataKeys,
- NonHMACRespDataKeys: nonHMACRespDataKeys,
- }
- if auditErr := c.auditBroker.LogResponse(ctx, logInput, c.auditedHeaders); auditErr != nil {
- c.logger.Error("failed to audit response", "request_path", req.Path, "error", auditErr)
- return nil, ErrInternalError
- }
- }
-
- return
-}
-
-func isControlGroupRun(req *logical.Request) bool {
- return req.ControlGroup != nil
-}
-
-func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) {
- defer metrics.MeasureSince([]string{"core", "handle_request"}, time.Now())
-
- var nonHMACReqDataKeys []string
- entry := c.router.MatchingMountEntry(ctx, req.Path)
- if entry != nil {
- // Get and set ignored HMAC'd value.
- if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
- nonHMACReqDataKeys = rawVals.([]string)
- }
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- c.logger.Error("failed to get namespace from context", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return
- }
-
- // Validate the token
- auth, te, ctErr := c.checkToken(ctx, req, false)
- // We run this logic first because we want to decrement the use count even in the case of an error
- if te != nil && !isControlGroupRun(req) {
- // Attempt to use the token (decrement NumUses)
- var err error
- te, err = c.tokenStore.UseToken(ctx, te)
- if err != nil {
- c.logger.Error("failed to use token", "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, nil, retErr
- }
- if te == nil {
- // Token has been revoked by this point
- retErr = multierror.Append(retErr, logical.ErrPermissionDenied)
- return nil, nil, retErr
- }
- if te.NumUses == tokenRevocationPending {
- // We defer a revocation until after logic has run, since this is a
- // valid request (this is the token's final use). We pass the ID in
- // directly just to be safe in case something else modifies te later.
- defer func(id string) {
- nsActiveCtx := namespace.ContextWithNamespace(c.activeContext, ns)
- leaseID, err := c.expiration.CreateOrFetchRevocationLeaseByToken(nsActiveCtx, te)
- if err == nil {
- err = c.expiration.LazyRevoke(ctx, leaseID)
- }
- if err != nil {
- c.logger.Error("failed to revoke token", "error", err)
- retResp = nil
- retAuth = nil
- retErr = multierror.Append(retErr, ErrInternalError)
- }
- if retResp != nil && retResp.Secret != nil &&
- // Some backends return a TTL even without a Lease ID
- retResp.Secret.LeaseID != "" {
- retResp = logical.ErrorResponse("Secret cannot be returned; token had one use left, so leased credentials were immediately revoked.")
- return
- }
- }(te.ID)
- }
- }
-
- if ctErr != nil {
- newCtErr, cgResp, cgAuth, cgRetErr := checkNeedsCG(ctx, c, req, auth, ctErr, nonHMACReqDataKeys)
- switch {
- case newCtErr != nil:
- ctErr = err
- case cgResp != nil || cgAuth != nil:
- if cgRetErr != nil {
- retErr = multierror.Append(retErr, cgRetErr)
- }
- return cgResp, cgAuth, retErr
- }
-
- // If it is an internal error we return that, otherwise we
- // return invalid request so that the status codes can be correct
- switch {
- case ctErr == ErrInternalError,
- errwrap.Contains(ctErr, ErrInternalError.Error()),
- ctErr == logical.ErrPermissionDenied,
- errwrap.Contains(ctErr, logical.ErrPermissionDenied.Error()):
- switch ctErr.(type) {
- case *multierror.Error:
- retErr = ctErr
- default:
- retErr = multierror.Append(retErr, ctErr)
- }
- default:
- retErr = multierror.Append(retErr, logical.ErrInvalidRequest)
- }
-
- if !isControlGroupRun(req) {
- logInput := &audit.LogInput{
- Auth: auth,
- Request: req,
- OuterErr: ctErr,
- NonHMACReqDataKeys: nonHMACReqDataKeys,
- }
- if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
- c.logger.Error("failed to audit request", "path", req.Path, "error", err)
- }
- }
-
- if errwrap.Contains(retErr, ErrInternalError.Error()) {
- return nil, auth, retErr
- }
- return logical.ErrorResponse(ctErr.Error()), auth, retErr
- }
-
- // Attach the display name
- req.DisplayName = auth.DisplayName
-
- // Create an audit trail of the request
- if !isControlGroupRun(req) {
- logInput := &audit.LogInput{
- Auth: auth,
- Request: req,
- NonHMACReqDataKeys: nonHMACReqDataKeys,
- }
- if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
- c.logger.Error("failed to audit request", "path", req.Path, "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, auth, retErr
- }
- }
-
- // Route the request
- resp, routeErr := c.router.Route(ctx, req)
- // If we're replicating and we get a read-only error from a backend, need to forward to primary
- if routeErr != nil {
- resp, routeErr = possiblyForward(ctx, c, req, resp, routeErr)
- }
- if resp != nil {
- // If wrapping is used, use the shortest between the request and response
- var wrapTTL time.Duration
- var wrapFormat, creationPath string
- var sealWrap bool
-
- // Ensure no wrap info information is set other than, possibly, the TTL
- if resp.WrapInfo != nil {
- if resp.WrapInfo.TTL > 0 {
- wrapTTL = resp.WrapInfo.TTL
- }
- wrapFormat = resp.WrapInfo.Format
- creationPath = resp.WrapInfo.CreationPath
- sealWrap = resp.WrapInfo.SealWrap
- resp.WrapInfo = nil
- }
-
- if req.WrapInfo != nil {
- if req.WrapInfo.TTL > 0 {
- switch {
- case wrapTTL == 0:
- wrapTTL = req.WrapInfo.TTL
- case req.WrapInfo.TTL < wrapTTL:
- wrapTTL = req.WrapInfo.TTL
- }
- }
- // If the wrap format hasn't been set by the response, set it to
- // the request format
- if req.WrapInfo.Format != "" && wrapFormat == "" {
- wrapFormat = req.WrapInfo.Format
- }
- }
-
- if wrapTTL > 0 {
- resp.WrapInfo = &wrapping.ResponseWrapInfo{
- TTL: wrapTTL,
- Format: wrapFormat,
- CreationPath: creationPath,
- SealWrap: sealWrap,
- }
- }
- }
-
- // If there is a secret, we must register it with the expiration manager.
- // We exclude renewal of a lease, since it does not need to be re-registered
- if resp != nil && resp.Secret != nil && !strings.HasPrefix(req.Path, "sys/renew") &&
- !strings.HasPrefix(req.Path, "sys/leases/renew") {
- // KV mounts should return the TTL but not register
- // for a lease as this provides a massive slowdown
- registerLease := true
-
- matchingMountEntry := c.router.MatchingMountEntry(ctx, req.Path)
- if matchingMountEntry == nil {
- c.logger.Error("unable to retrieve kv mount entry from router")
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, auth, retErr
- }
-
- switch matchingMountEntry.Type {
- case "kv", "generic":
- // If we are kv type, first see if we are an older passthrough
- // backend, and otherwise check the mount entry options.
- matchingBackend := c.router.MatchingBackend(ctx, req.Path)
- if matchingBackend == nil {
- c.logger.Error("unable to retrieve kv backend from router")
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, auth, retErr
- }
-
- if ptbe, ok := matchingBackend.(*PassthroughBackend); ok {
- if !ptbe.GeneratesLeases() {
- registerLease = false
- resp.Secret.Renewable = false
- }
- } else if matchingMountEntry.Options == nil || matchingMountEntry.Options["leased_passthrough"] != "true" {
- registerLease = false
- resp.Secret.Renewable = false
- }
-
- case "plugin":
- // If we are a plugin type and the plugin name is "kv" check the
- // mount entry options.
- if matchingMountEntry.Config.PluginName == "kv" && (matchingMountEntry.Options == nil || matchingMountEntry.Options["leased_passthrough"] != "true") {
- registerLease = false
- resp.Secret.Renewable = false
- }
- }
-
- if registerLease {
- sysView := c.router.MatchingSystemView(ctx, req.Path)
- if sysView == nil {
- c.logger.Error("unable to look up sys view for login path", "request_path", req.Path)
- return nil, nil, ErrInternalError
- }
-
- ttl, warnings, err := framework.CalculateTTL(sysView, 0, resp.Secret.TTL, 0, resp.Secret.MaxTTL, 0, time.Time{})
- if err != nil {
- return nil, nil, err
- }
- for _, warning := range warnings {
- resp.AddWarning(warning)
- }
- resp.Secret.TTL = ttl
-
- registerFunc, funcGetErr := getLeaseRegisterFunc(c)
- if funcGetErr != nil {
- retErr = multierror.Append(retErr, funcGetErr)
- return nil, auth, retErr
- }
-
- leaseID, err := registerFunc(ctx, req, resp)
- if err != nil {
- c.logger.Error("failed to register lease", "request_path", req.Path, "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, auth, retErr
- }
- resp.Secret.LeaseID = leaseID
-
- // Get the actual time of the lease
- le, err := c.expiration.FetchLeaseTimes(ctx, leaseID)
- if err != nil {
- c.logger.Error("failed to fetch updated lease time", "request_path", req.Path, "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, auth, retErr
- }
- // We round here because the clock will have already started
- // ticking, so we'll end up always returning 299 instead of 300 or
- // 26399 instead of 26400, say, even if it's just a few
- // microseconds. This provides a nicer UX.
- resp.Secret.TTL = le.ExpireTime.Sub(time.Now()).Round(time.Second)
- }
- }
-
- // Only the token store is allowed to return an auth block, for any
- // other request this is an internal error. We exclude renewal of a token,
- // since it does not need to be re-registered
- if resp != nil && resp.Auth != nil && !strings.HasPrefix(req.Path, "auth/token/renew") {
- if !strings.HasPrefix(req.Path, "auth/token/") {
- c.logger.Error("unexpected Auth response for non-token backend", "request_path", req.Path)
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, auth, retErr
- }
-
- // Fetch the namespace to which the token belongs
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, c)
- if err != nil {
- c.logger.Error("failed to fetch token's namespace", "error", err)
- retErr = multierror.Append(retErr, err)
- return nil, auth, retErr
- }
- if tokenNS == nil {
- c.logger.Error(namespace.ErrNoNamespace.Error())
- retErr = multierror.Append(retErr, namespace.ErrNoNamespace)
- return nil, auth, retErr
- }
-
- _, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, tokenNS, resp.Auth.EntityID)
- if err != nil {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- return nil, nil, ErrInternalError
- }
-
- resp.Auth.TokenPolicies = policyutil.SanitizePolicies(resp.Auth.Policies, policyutil.DoNotAddDefaultPolicy)
- switch resp.Auth.TokenType {
- case logical.TokenTypeBatch:
- case logical.TokenTypeService:
- if err := c.expiration.RegisterAuth(ctx, &logical.TokenEntry{
- Path: resp.Auth.CreationPath,
- NamespaceID: ns.ID,
- }, resp.Auth); err != nil {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to register token lease", "request_path", req.Path, "error", err)
- retErr = multierror.Append(retErr, ErrInternalError)
- return nil, auth, retErr
- }
- }
-
- // We do these later since it's not meaningful for backends/expmgr to
- // have what is purely a snapshot of current identity policies, and
- // plugins can be confused if they are checking contents of
- // Auth.Policies instead of Auth.TokenPolicies
- resp.Auth.Policies = policyutil.SanitizePolicies(append(resp.Auth.Policies, identityPolicies[te.NamespaceID]...), policyutil.DoNotAddDefaultPolicy)
- resp.Auth.IdentityPolicies = policyutil.SanitizePolicies(identityPolicies[te.NamespaceID], policyutil.DoNotAddDefaultPolicy)
- delete(identityPolicies, te.NamespaceID)
- resp.Auth.ExternalNamespacePolicies = identityPolicies
- }
-
- if resp != nil &&
- req.Path == "cubbyhole/response" &&
- len(te.Policies) == 1 &&
- te.Policies[0] == responseWrappingPolicyName {
- resp.AddWarning("Reading from 'cubbyhole/response' is deprecated. Please use sys/wrapping/unwrap to unwrap responses, as it provides additional security checks and other benefits.")
- }
-
- // Return the response and error
- if routeErr != nil {
- retErr = multierror.Append(retErr, routeErr)
- }
-
- return resp, auth, retErr
-}
-
-// handleLoginRequest is used to handle a login request, which is an
-// unauthenticated request to the backend.
-func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) {
- defer metrics.MeasureSince([]string{"core", "handle_login_request"}, time.Now())
-
- req.Unauthenticated = true
-
- var auth *logical.Auth
-
- // Do an unauth check. This will cause EGP policies to be checked
- var ctErr error
- auth, _, ctErr = c.checkToken(ctx, req, true)
- if ctErr != nil {
- // If it is an internal error we return that, otherwise we
- // return invalid request so that the status codes can be correct
- var errType error
- switch ctErr {
- case ErrInternalError, logical.ErrPermissionDenied:
- errType = ctErr
- default:
- errType = logical.ErrInvalidRequest
- }
-
- var nonHMACReqDataKeys []string
- entry := c.router.MatchingMountEntry(ctx, req.Path)
- if entry != nil {
- // Get and set ignored HMAC'd value.
- if rawVals, ok := entry.synthesizedConfigCache.Load("audit_non_hmac_request_keys"); ok {
- nonHMACReqDataKeys = rawVals.([]string)
- }
- }
-
- logInput := &audit.LogInput{
- Auth: auth,
- Request: req,
- OuterErr: ctErr,
- NonHMACReqDataKeys: nonHMACReqDataKeys,
- }
- if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
- c.logger.Error("failed to audit request", "path", req.Path, "error", err)
- return nil, nil, ErrInternalError
- }
-
- if errType != nil {
- retErr = multierror.Append(retErr, errType)
- }
- if ctErr == ErrInternalError {
- return nil, auth, retErr
- }
- return logical.ErrorResponse(ctErr.Error()), auth, retErr
- }
-
- // Create an audit trail of the request. Attach auth if it was returned,
- // e.g. if a token was provided.
- logInput := &audit.LogInput{
- Auth: auth,
- Request: req,
- }
- if err := c.auditBroker.LogRequest(ctx, logInput, c.auditedHeaders); err != nil {
- c.logger.Error("failed to audit request", "path", req.Path, "error", err)
- return nil, nil, ErrInternalError
- }
-
- // The token store uses authentication even when creating a new token,
- // so it's handled in handleRequest. It should not be reached here.
- if strings.HasPrefix(req.Path, "auth/token/") {
- c.logger.Error("unexpected login request for token backend", "request_path", req.Path)
- return nil, nil, ErrInternalError
- }
-
- // Route the request
- resp, routeErr := c.router.Route(ctx, req)
- // If we're replicating and we get a read-only error from a backend, need to forward to primary
- if routeErr != nil {
- resp, routeErr = possiblyForward(ctx, c, req, resp, routeErr)
- }
- if resp != nil {
- // If wrapping is used, use the shortest between the request and response
- var wrapTTL time.Duration
- var wrapFormat, creationPath string
- var sealWrap bool
-
- // Ensure no wrap info information is set other than, possibly, the TTL
- if resp.WrapInfo != nil {
- if resp.WrapInfo.TTL > 0 {
- wrapTTL = resp.WrapInfo.TTL
- }
- wrapFormat = resp.WrapInfo.Format
- creationPath = resp.WrapInfo.CreationPath
- sealWrap = resp.WrapInfo.SealWrap
- resp.WrapInfo = nil
- }
-
- if req.WrapInfo != nil {
- if req.WrapInfo.TTL > 0 {
- switch {
- case wrapTTL == 0:
- wrapTTL = req.WrapInfo.TTL
- case req.WrapInfo.TTL < wrapTTL:
- wrapTTL = req.WrapInfo.TTL
- }
- }
- if req.WrapInfo.Format != "" && wrapFormat == "" {
- wrapFormat = req.WrapInfo.Format
- }
- }
-
- if wrapTTL > 0 {
- resp.WrapInfo = &wrapping.ResponseWrapInfo{
- TTL: wrapTTL,
- Format: wrapFormat,
- CreationPath: creationPath,
- SealWrap: sealWrap,
- }
- }
- }
-
- // A login request should never return a secret!
- if resp != nil && resp.Secret != nil {
- c.logger.Error("unexpected Secret response for login path", "request_path", req.Path)
- return nil, nil, ErrInternalError
- }
-
- // If the response generated an authentication, then generate the token
- if resp != nil && resp.Auth != nil {
-
- var entity *identity.Entity
- auth = resp.Auth
-
- mEntry := c.router.MatchingMountEntry(ctx, req.Path)
-
- if auth.Alias != nil &&
- mEntry != nil &&
- !mEntry.Local &&
- c.identityStore != nil {
- // Overwrite the mount type and mount path in the alias
- // information
- auth.Alias.MountType = req.MountType
- auth.Alias.MountAccessor = req.MountAccessor
-
- if auth.Alias.Name == "" {
- return nil, nil, fmt.Errorf("missing name in alias")
- }
-
- var err error
-
- // Fetch the entity for the alias, or create an entity if one
- // doesn't exist.
- entity, err = c.identityStore.CreateOrFetchEntity(ctx, auth.Alias)
- if err != nil {
- entity, err = possiblyForwardAliasCreation(ctx, c, err, auth, entity)
- }
- if err != nil {
- return nil, nil, err
- }
- if entity == nil {
- return nil, nil, fmt.Errorf("failed to create an entity for the authenticated alias")
- }
-
- if entity.Disabled {
- return nil, nil, logical.ErrPermissionDenied
- }
-
- auth.EntityID = entity.ID
- if auth.GroupAliases != nil {
- validAliases, err := c.identityStore.refreshExternalGroupMembershipsByEntityID(auth.EntityID, auth.GroupAliases)
- if err != nil {
- return nil, nil, err
- }
- auth.GroupAliases = validAliases
- }
- }
-
- // Determine the source of the login
- source := c.router.MatchingMount(ctx, req.Path)
- source = strings.TrimPrefix(source, credentialRoutePrefix)
- source = strings.Replace(source, "/", "-", -1)
-
- // Prepend the source to the display name
- auth.DisplayName = strings.TrimSuffix(source+auth.DisplayName, "-")
-
- sysView := c.router.MatchingSystemView(ctx, req.Path)
- if sysView == nil {
- c.logger.Error("unable to look up sys view for login path", "request_path", req.Path)
- return nil, nil, ErrInternalError
- }
-
- tokenTTL, warnings, err := framework.CalculateTTL(sysView, 0, auth.TTL, auth.Period, auth.MaxTTL, auth.ExplicitMaxTTL, time.Time{})
- if err != nil {
- return nil, nil, err
- }
- for _, warning := range warnings {
- resp.AddWarning(warning)
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, nil, err
- }
- _, identityPolicies, err := c.fetchEntityAndDerivedPolicies(ctx, ns, auth.EntityID)
- if err != nil {
- return nil, nil, ErrInternalError
- }
-
- auth.TokenPolicies = policyutil.SanitizePolicies(auth.Policies, policyutil.AddDefaultPolicy)
- allPolicies := policyutil.SanitizePolicies(append(auth.TokenPolicies, identityPolicies[ns.ID]...), policyutil.DoNotAddDefaultPolicy)
-
- // Prevent internal policies from being assigned to tokens. We check
- // this on auth.Policies including derived ones from Identity before
- // actually making the token.
- for _, policy := range allPolicies {
- if policy == "root" {
- return logical.ErrorResponse("auth methods cannot create root tokens"), nil, logical.ErrInvalidRequest
- }
- if strutil.StrListContains(nonAssignablePolicies, policy) {
- return logical.ErrorResponse(fmt.Sprintf("cannot assign policy %q", policy)), nil, logical.ErrInvalidRequest
- }
- }
-
- var registerFunc RegisterAuthFunc
- var funcGetErr error
- // Batch tokens should not be forwarded to perf standby
- if auth.TokenType == logical.TokenTypeBatch {
- registerFunc = c.RegisterAuth
- } else {
- registerFunc, funcGetErr = getAuthRegisterFunc(c)
- }
- if funcGetErr != nil {
- retErr = multierror.Append(retErr, funcGetErr)
- return nil, auth, retErr
- }
-
- err = registerFunc(ctx, tokenTTL, req.Path, auth)
- switch {
- case err == nil:
- case err == ErrInternalError:
- return nil, auth, err
- default:
- return logical.ErrorResponse(err.Error()), auth, logical.ErrInvalidRequest
- }
-
- auth.IdentityPolicies = policyutil.SanitizePolicies(identityPolicies[ns.ID], policyutil.DoNotAddDefaultPolicy)
- delete(identityPolicies, ns.ID)
- auth.ExternalNamespacePolicies = identityPolicies
- auth.Policies = allPolicies
-
- // Attach the display name, might be used by audit backends
- req.DisplayName = auth.DisplayName
-
- }
-
- return resp, auth, routeErr
-}
-
-func (c *Core) RegisterAuth(ctx context.Context, tokenTTL time.Duration, path string, auth *logical.Auth) error {
- // We first assign token policies to what was returned from the backend
- // via auth.Policies. Then, we get the full set of policies into
- // auth.Policies from the backend + entity information -- this is not
- // stored in the token, but we perform sanity checks on it and return
- // that information to the user.
-
- // Generate a token
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- te := logical.TokenEntry{
- Path: path,
- Meta: auth.Metadata,
- DisplayName: auth.DisplayName,
- CreationTime: time.Now().Unix(),
- TTL: tokenTTL,
- NumUses: auth.NumUses,
- EntityID: auth.EntityID,
- BoundCIDRs: auth.BoundCIDRs,
- Policies: auth.TokenPolicies,
- NamespaceID: ns.ID,
- ExplicitMaxTTL: auth.ExplicitMaxTTL,
- Type: auth.TokenType,
- }
-
- if err := c.tokenStore.create(ctx, &te); err != nil {
- c.logger.Error("failed to create token", "error", err)
- return ErrInternalError
- }
-
- // Populate the client token, accessor, and TTL
- auth.ClientToken = te.ID
- auth.Accessor = te.Accessor
- auth.TTL = te.TTL
-
- switch auth.TokenType {
- case logical.TokenTypeBatch:
- // Ensure it's not marked renewable since it isn't
- auth.Renewable = false
- case logical.TokenTypeService:
- // Register with the expiration manager
- if err := c.expiration.RegisterAuth(ctx, &te, auth); err != nil {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to register token lease", "request_path", path, "error", err)
- return ErrInternalError
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/request_handling_util.go b/vendor/github.com/hashicorp/vault/vault/request_handling_util.go
deleted file mode 100644
index ffcc419a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/request_handling_util.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
-
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/logical"
-)
-
-func waitForReplicationState(context.Context, *Core, *logical.Request) error { return nil }
-
-func checkNeedsCG(context.Context, *Core, *logical.Request, *logical.Auth, error, []string) (error, *logical.Response, *logical.Auth, error) {
- return nil, nil, nil, nil
-}
-
-func possiblyForward(ctx context.Context, c *Core, req *logical.Request, resp *logical.Response, routeErr error) (*logical.Response, error) {
- return resp, routeErr
-}
-
-func getLeaseRegisterFunc(c *Core) (func(context.Context, *logical.Request, *logical.Response) (string, error), error) {
- return c.expiration.Register, nil
-}
-
-func getAuthRegisterFunc(c *Core) (RegisterAuthFunc, error) {
- return c.RegisterAuth, nil
-}
-
-func possiblyForwardAliasCreation(ctx context.Context, c *Core, inErr error, auth *logical.Auth, entity *identity.Entity) (*identity.Entity, error) {
- return entity, inErr
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/rollback.go b/vendor/github.com/hashicorp/vault/vault/rollback.go
deleted file mode 100644
index f9b49539..00000000
--- a/vendor/github.com/hashicorp/vault/vault/rollback.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package vault
-
-import (
- "context"
- "errors"
- "strings"
- "sync"
- "time"
-
- log "github.com/hashicorp/go-hclog"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // rollbackPeriod is how often we attempt rollbacks for all the backends
- rollbackPeriod = time.Minute
-)
-
-// RollbackManager is responsible for performing rollbacks of partial
-// secrets within logical backends.
-//
-// During normal operations, it is possible for logical backends to
-// error partially through an operation. These are called "partial secrets":
-// they are never sent back to a user, but they do need to be cleaned up.
-// This manager handles that by periodically (on a timer) requesting that the
-// backends clean up.
-//
-// The RollbackManager periodically initiates a logical.RollbackOperation
-// on every mounted logical backend. It ensures that only one rollback operation
-// is in-flight at any given time within a single seal/unseal phase.
-type RollbackManager struct {
- logger log.Logger
-
- // This gives the current mount table of both logical and credential backends,
- // plus a RWMutex that is locked for reading. It is up to the caller to RUnlock
- // it when done with the mount table.
- backends func() []*MountEntry
-
- router *Router
- period time.Duration
-
- inflightAll sync.WaitGroup
- inflight map[string]*rollbackState
- inflightLock sync.RWMutex
-
- doneCh chan struct{}
- shutdown bool
- shutdownCh chan struct{}
- shutdownLock sync.Mutex
- quitContext context.Context
-
- core *Core
-}
-
-// rollbackState is used to track the state of a single rollback attempt
-type rollbackState struct {
- lastError error
- sync.WaitGroup
-}
-
-// NewRollbackManager is used to create a new rollback manager
-func NewRollbackManager(ctx context.Context, logger log.Logger, backendsFunc func() []*MountEntry, router *Router, core *Core) *RollbackManager {
- r := &RollbackManager{
- logger: logger,
- backends: backendsFunc,
- router: router,
- period: rollbackPeriod,
- inflight: make(map[string]*rollbackState),
- doneCh: make(chan struct{}),
- shutdownCh: make(chan struct{}),
- quitContext: ctx,
- core: core,
- }
- return r
-}
-
-// Start starts the rollback manager
-func (m *RollbackManager) Start() {
- go m.run()
-}
-
-// Stop stops the running manager. This will wait for any in-flight
-// rollbacks to complete.
-func (m *RollbackManager) Stop() {
- m.shutdownLock.Lock()
- defer m.shutdownLock.Unlock()
- if !m.shutdown {
- m.shutdown = true
- close(m.shutdownCh)
- <-m.doneCh
- }
- m.inflightAll.Wait()
-}
-
-// run is a long running routine to periodically invoke rollback
-func (m *RollbackManager) run() {
- m.logger.Info("starting rollback manager")
- tick := time.NewTicker(m.period)
- defer tick.Stop()
- defer close(m.doneCh)
- for {
- select {
- case <-tick.C:
- m.triggerRollbacks()
-
- case <-m.shutdownCh:
- m.logger.Info("stopping rollback manager")
- return
- }
- }
-}
-
-// triggerRollbacks is used to trigger the rollbacks across all the backends
-func (m *RollbackManager) triggerRollbacks() {
-
- backends := m.backends()
-
- for _, e := range backends {
- path := e.Path
- if e.Table == credentialTableType {
- path = credentialRoutePrefix + path
- }
-
- // When the mount is filtered, the backend will be nil
- ctx := namespace.ContextWithNamespace(m.quitContext, e.namespace)
- backend := m.router.MatchingBackend(ctx, path)
- if backend == nil {
- continue
- }
- fullPath := e.namespace.Path + path
-
- m.inflightLock.RLock()
- _, ok := m.inflight[fullPath]
- m.inflightLock.RUnlock()
- if !ok {
- m.startRollback(ctx, fullPath, true)
- }
- }
-}
-
-// startRollback is used to start an async rollback attempt.
-// This must be called with the inflightLock held.
-func (m *RollbackManager) startRollback(ctx context.Context, fullPath string, grabStatelock bool) *rollbackState {
- rs := &rollbackState{}
- rs.Add(1)
- m.inflightAll.Add(1)
- m.inflightLock.Lock()
- m.inflight[fullPath] = rs
- m.inflightLock.Unlock()
- go m.attemptRollback(ctx, fullPath, rs, grabStatelock)
- return rs
-}
-
-// attemptRollback invokes a RollbackOperation for the given path
-func (m *RollbackManager) attemptRollback(ctx context.Context, fullPath string, rs *rollbackState, grabStatelock bool) (err error) {
- defer metrics.MeasureSince([]string{"rollback", "attempt", strings.Replace(fullPath, "/", "-", -1)}, time.Now())
- if m.logger.IsDebug() {
- m.logger.Debug("attempting rollback", "path", fullPath)
- }
-
- defer func() {
- rs.lastError = err
- rs.Done()
- m.inflightAll.Done()
- m.inflightLock.Lock()
- delete(m.inflight, fullPath)
- m.inflightLock.Unlock()
- }()
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- if ns == nil {
- return namespace.ErrNoNamespace
- }
-
- // Invoke a RollbackOperation
- req := &logical.Request{
- Operation: logical.RollbackOperation,
- Path: ns.TrimmedPath(fullPath),
- }
-
- if grabStatelock {
- // Grab the statelock or stop
- if stopped := grabLockOrStop(m.core.stateLock.RLock, m.core.stateLock.RUnlock, m.shutdownCh); stopped {
- return errors.New("rollback shutting down")
- }
- }
-
- var cancelFunc context.CancelFunc
- ctx, cancelFunc = context.WithTimeout(ctx, DefaultMaxRequestDuration)
- _, err = m.router.Route(ctx, req)
- if grabStatelock {
- m.core.stateLock.RUnlock()
- }
- cancelFunc()
-
- // If the error is an unsupported operation, then it doesn't
- // matter, the backend doesn't support it.
- if err == logical.ErrUnsupportedOperation {
- err = nil
- }
- // If we failed due to read-only storage, we can't do anything; ignore
- if err != nil && strings.Contains(err.Error(), logical.ErrReadOnly.Error()) {
- err = nil
- }
- if err != nil {
- m.logger.Error("error rolling back", "path", fullPath, "error", err)
- }
- return
-}
-
-// Rollback is used to trigger an immediate rollback of the path,
-// or to join an existing rollback operation if in flight. Caller should have
-// core's statelock held
-func (m *RollbackManager) Rollback(ctx context.Context, path string) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- fullPath := ns.Path + path
-
- // Check for an existing attempt and start one if none
- m.inflightLock.RLock()
- rs, ok := m.inflight[fullPath]
- m.inflightLock.RUnlock()
- if !ok {
- rs = m.startRollback(ctx, fullPath, false)
- }
-
- // Wait for the attempt to finish
- rs.Wait()
-
- // Return the last error
- return rs.lastError
-}
-
-// The methods below are the hooks from core that are called pre/post seal.
-
-// startRollback is used to start the rollback manager after unsealing
-func (c *Core) startRollback() error {
- backendsFunc := func() []*MountEntry {
- ret := []*MountEntry{}
- c.mountsLock.RLock()
- defer c.mountsLock.RUnlock()
- // During teardown/setup after a leader change or unseal there could be
- // something racy here so make sure the table isn't nil
- if c.mounts != nil {
- for _, entry := range c.mounts.Entries {
- ret = append(ret, entry)
- }
- }
- c.authLock.RLock()
- defer c.authLock.RUnlock()
- // During teardown/setup after a leader change or unseal there could be
- // something racy here so make sure the table isn't nil
- if c.auth != nil {
- for _, entry := range c.auth.Entries {
- ret = append(ret, entry)
- }
- }
- return ret
- }
- rollbackLogger := c.baseLogger.Named("rollback")
- c.AddLogger(rollbackLogger)
- c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c)
- c.rollback.Start()
- return nil
-}
-
-// stopRollback is used to stop running the rollback manager before sealing
-func (c *Core) stopRollback() error {
- if c.rollback != nil {
- c.rollback.Stop()
- c.rollback = nil
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/router.go b/vendor/github.com/hashicorp/vault/vault/router.go
deleted file mode 100644
index 66c1ef23..00000000
--- a/vendor/github.com/hashicorp/vault/vault/router.go
+++ /dev/null
@@ -1,823 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/armon/go-metrics"
- "github.com/armon/go-radix"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
-)
-
-var (
- denylistHeaders = []string{
- "Authorization",
- consts.AuthHeaderName,
- }
-)
-
-// Router is used to do prefix based routing of a request to a logical backend
-type Router struct {
- l sync.RWMutex
- root *radix.Tree
- mountUUIDCache *radix.Tree
- mountAccessorCache *radix.Tree
- tokenStoreSaltFunc func(context.Context) (*salt.Salt, error)
- // storagePrefix maps the prefix used for storage (ala the BarrierView)
- // to the backend. This is used to map a key back into the backend that owns it.
- // For example, logical/uuid1/foobar -> secrets/ (kv backend) + foobar
- storagePrefix *radix.Tree
-}
-
-// NewRouter returns a new router
-func NewRouter() *Router {
- r := &Router{
- root: radix.New(),
- storagePrefix: radix.New(),
- mountUUIDCache: radix.New(),
- mountAccessorCache: radix.New(),
- }
- return r
-}
-
-// routeEntry is used to represent a mount point in the router
-type routeEntry struct {
- tainted bool
- backend logical.Backend
- mountEntry *MountEntry
- storageView logical.Storage
- storagePrefix string
- rootPaths atomic.Value
- loginPaths atomic.Value
- l sync.RWMutex
-}
-
-type validateMountResponse struct {
- MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"`
- MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor"`
- MountPath string `json:"mount_path" structs:"mount_path" mapstructure:"mount_path"`
- MountLocal bool `json:"mount_local" structs:"mount_local" mapstructure:"mount_local"`
-}
-
-// validateMountByAccessor returns the mount type and ID for a given mount
-// accessor
-func (r *Router) validateMountByAccessor(accessor string) *validateMountResponse {
- if accessor == "" {
- return nil
- }
-
- mountEntry := r.MatchingMountByAccessor(accessor)
- if mountEntry == nil {
- return nil
- }
-
- mountPath := mountEntry.Path
- if mountEntry.Table == credentialTableType {
- mountPath = credentialRoutePrefix + mountPath
- }
-
- return &validateMountResponse{
- MountAccessor: mountEntry.Accessor,
- MountType: mountEntry.Type,
- MountPath: mountPath,
- MountLocal: mountEntry.Local,
- }
-}
-
-// SaltID is used to apply a salt and hash to an ID to make sure its not reversible
-func (re *routeEntry) SaltID(id string) string {
- return salt.SaltID(re.mountEntry.UUID, id, salt.SHA1Hash)
-}
-
-// Mount is used to expose a logical backend at a given prefix, using a unique salt,
-// and the barrier view for that path.
-func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *MountEntry, storageView *BarrierView) error {
- r.l.Lock()
- defer r.l.Unlock()
-
- // prepend namespace
- prefix = mountEntry.Namespace().Path + prefix
-
- // Check if this is a nested mount
- if existing, _, ok := r.root.LongestPrefix(prefix); ok && existing != "" {
- return fmt.Errorf("cannot mount under existing mount %q", existing)
- }
-
- // Build the paths
- paths := new(logical.Paths)
- if backend != nil {
- specialPaths := backend.SpecialPaths()
- if specialPaths != nil {
- paths = specialPaths
- }
- }
-
- // Create a mount entry
- re := &routeEntry{
- tainted: false,
- backend: backend,
- mountEntry: mountEntry,
- storagePrefix: storageView.prefix,
- storageView: storageView,
- }
- re.rootPaths.Store(pathsToRadix(paths.Root))
- re.loginPaths.Store(pathsToRadix(paths.Unauthenticated))
-
- switch {
- case prefix == "":
- return fmt.Errorf("missing prefix to be used for router entry; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
- case re.storagePrefix == "":
- return fmt.Errorf("missing storage view prefix; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
- case re.mountEntry.UUID == "":
- return fmt.Errorf("missing mount identifier; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
- case re.mountEntry.Accessor == "":
- return fmt.Errorf("missing mount accessor; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
- }
-
- r.root.Insert(prefix, re)
- r.storagePrefix.Insert(re.storagePrefix, re)
- r.mountUUIDCache.Insert(re.mountEntry.UUID, re.mountEntry)
- r.mountAccessorCache.Insert(re.mountEntry.Accessor, re.mountEntry)
-
- return nil
-}
-
-// Unmount is used to remove a logical backend from a given prefix
-func (r *Router) Unmount(ctx context.Context, prefix string) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- prefix = ns.Path + prefix
-
- r.l.Lock()
- defer r.l.Unlock()
-
- // Fast-path out if the backend doesn't exist
- raw, ok := r.root.Get(prefix)
- if !ok {
- return nil
- }
-
- // Call backend's Cleanup routine
- re := raw.(*routeEntry)
- if re.backend != nil {
- re.backend.Cleanup(ctx)
- }
-
- // Purge from the radix trees
- r.root.Delete(prefix)
- r.storagePrefix.Delete(re.storagePrefix)
- r.mountUUIDCache.Delete(re.mountEntry.UUID)
- r.mountAccessorCache.Delete(re.mountEntry.Accessor)
-
- return nil
-}
-
-// Remount is used to change the mount location of a logical backend
-func (r *Router) Remount(ctx context.Context, src, dst string) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- src = ns.Path + src
- dst = ns.Path + dst
-
- r.l.Lock()
- defer r.l.Unlock()
-
- // Check for existing mount
- raw, ok := r.root.Get(src)
- if !ok {
- return fmt.Errorf("no mount at %q", src)
- }
-
- // Update the mount point
- r.root.Delete(src)
- r.root.Insert(dst, raw)
- return nil
-}
-
-// Taint is used to mark a path as tainted. This means only RollbackOperation
-// RevokeOperation requests are allowed to proceed
-func (r *Router) Taint(ctx context.Context, path string) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- path = ns.Path + path
-
- r.l.Lock()
- defer r.l.Unlock()
- _, raw, ok := r.root.LongestPrefix(path)
- if ok {
- raw.(*routeEntry).tainted = true
- }
- return nil
-}
-
-// Untaint is used to unmark a path as tainted.
-func (r *Router) Untaint(ctx context.Context, path string) error {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- path = ns.Path + path
-
- r.l.Lock()
- defer r.l.Unlock()
- _, raw, ok := r.root.LongestPrefix(path)
- if ok {
- raw.(*routeEntry).tainted = false
- }
- return nil
-}
-
-func (r *Router) MatchingMountByUUID(mountID string) *MountEntry {
- if mountID == "" {
- return nil
- }
-
- r.l.RLock()
-
- _, raw, ok := r.mountUUIDCache.LongestPrefix(mountID)
- if !ok {
- r.l.RUnlock()
- return nil
- }
-
- r.l.RUnlock()
- return raw.(*MountEntry)
-}
-
-// MatchingMountByAccessor returns the MountEntry by accessor lookup
-func (r *Router) MatchingMountByAccessor(mountAccessor string) *MountEntry {
- if mountAccessor == "" {
- return nil
- }
-
- r.l.RLock()
-
- _, raw, ok := r.mountAccessorCache.LongestPrefix(mountAccessor)
- if !ok {
- r.l.RUnlock()
- return nil
- }
-
- r.l.RUnlock()
- return raw.(*MountEntry)
-}
-
-// MatchingMount returns the mount prefix that would be used for a path
-func (r *Router) MatchingMount(ctx context.Context, path string) string {
- r.l.RLock()
- mount := r.matchingMountInternal(ctx, path)
- r.l.RUnlock()
- return mount
-}
-
-func (r *Router) matchingMountInternal(ctx context.Context, path string) string {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return ""
- }
- path = ns.Path + path
-
- mount, _, ok := r.root.LongestPrefix(path)
- if !ok {
- return ""
- }
- return mount
-}
-
-// matchingPrefixInternal returns a mount prefix that a path may be a part of
-func (r *Router) matchingPrefixInternal(ctx context.Context, path string) string {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return ""
- }
- path = ns.Path + path
-
- var existing string
- fn := func(existingPath string, v interface{}) bool {
- if strings.HasPrefix(existingPath, path) {
- existing = existingPath
- return true
- }
- return false
- }
- r.root.WalkPrefix(path, fn)
- return existing
-}
-
-// MountConflict determines if there are potential path conflicts
-func (r *Router) MountConflict(ctx context.Context, path string) string {
- r.l.RLock()
- defer r.l.RUnlock()
- if exactMatch := r.matchingMountInternal(ctx, path); exactMatch != "" {
- return exactMatch
- }
- if prefixMatch := r.matchingPrefixInternal(ctx, path); prefixMatch != "" {
- return prefixMatch
- }
- return ""
-}
-
-// MatchingStorageByAPIPath/StoragePath returns the storage used for
-// API/Storage paths respectively
-func (r *Router) MatchingStorageByAPIPath(ctx context.Context, path string) logical.Storage {
- return r.matchingStorage(ctx, path, true)
-}
-func (r *Router) MatchingStorageByStoragePath(ctx context.Context, path string) logical.Storage {
- return r.matchingStorage(ctx, path, false)
-}
-func (r *Router) matchingStorage(ctx context.Context, path string, apiPath bool) logical.Storage {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil
- }
- path = ns.Path + path
-
- var raw interface{}
- var ok bool
- r.l.RLock()
- if apiPath {
- _, raw, ok = r.root.LongestPrefix(path)
- } else {
- _, raw, ok = r.storagePrefix.LongestPrefix(path)
- }
- r.l.RUnlock()
- if !ok {
- return nil
- }
- return raw.(*routeEntry).storageView
-}
-
-// MatchingMountEntry returns the MountEntry used for a path
-func (r *Router) MatchingMountEntry(ctx context.Context, path string) *MountEntry {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil
- }
- path = ns.Path + path
-
- r.l.RLock()
- _, raw, ok := r.root.LongestPrefix(path)
- r.l.RUnlock()
- if !ok {
- return nil
- }
- return raw.(*routeEntry).mountEntry
-}
-
-// MatchingBackend returns the backend used for a path
-func (r *Router) MatchingBackend(ctx context.Context, path string) logical.Backend {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil
- }
- path = ns.Path + path
-
- r.l.RLock()
- _, raw, ok := r.root.LongestPrefix(path)
- r.l.RUnlock()
- if !ok {
- return nil
- }
- return raw.(*routeEntry).backend
-}
-
-// MatchingSystemView returns the SystemView used for a path
-func (r *Router) MatchingSystemView(ctx context.Context, path string) logical.SystemView {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil
- }
- path = ns.Path + path
-
- r.l.RLock()
- _, raw, ok := r.root.LongestPrefix(path)
- r.l.RUnlock()
- if !ok {
- return nil
- }
- return raw.(*routeEntry).backend.System()
-}
-
-// MatchingStoragePrefixByAPIPath the storage prefix for the given api path
-func (r *Router) MatchingStoragePrefixByAPIPath(ctx context.Context, path string) (string, bool) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return "", false
- }
- path = ns.Path + path
-
- _, prefix, found := r.matchingMountEntryByPath(ctx, path, true)
- return prefix, found
-}
-
-// MatchingAPIPrefixByStoragePath the api path information for the given storage path
-func (r *Router) MatchingAPIPrefixByStoragePath(ctx context.Context, path string) (*namespace.Namespace, string, string, bool) {
- me, prefix, found := r.matchingMountEntryByPath(ctx, path, false)
- if !found {
- return nil, "", "", found
- }
-
- mountPath := me.Path
- // Add back the prefix for credential backends
- if strings.HasPrefix(path, credentialBarrierPrefix) {
- mountPath = credentialRoutePrefix + mountPath
- }
-
- return me.Namespace(), mountPath, prefix, found
-}
-
-func (r *Router) matchingMountEntryByPath(ctx context.Context, path string, apiPath bool) (*MountEntry, string, bool) {
- var raw interface{}
- var ok bool
- r.l.RLock()
- if apiPath {
- _, raw, ok = r.root.LongestPrefix(path)
- } else {
- _, raw, ok = r.storagePrefix.LongestPrefix(path)
- }
- r.l.RUnlock()
- if !ok {
- return nil, "", false
- }
-
- // Extract the mount path and storage prefix
- re := raw.(*routeEntry)
- prefix := re.storagePrefix
-
- return re.mountEntry, prefix, true
-}
-
-// Route is used to route a given request
-func (r *Router) Route(ctx context.Context, req *logical.Request) (*logical.Response, error) {
- resp, _, _, err := r.routeCommon(ctx, req, false)
- return resp, err
-}
-
-// RouteExistenceCheck is used to route a given existence check request
-func (r *Router) RouteExistenceCheck(ctx context.Context, req *logical.Request) (*logical.Response, bool, bool, error) {
- resp, ok, exists, err := r.routeCommon(ctx, req, true)
- return resp, ok, exists, err
-}
-
-func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenceCheck bool) (*logical.Response, bool, bool, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, false, false, err
- }
-
- // Find the mount point
- r.l.RLock()
- adjustedPath := req.Path
- mount, raw, ok := r.root.LongestPrefix(ns.Path + adjustedPath)
- if !ok && !strings.HasSuffix(adjustedPath, "/") {
- // Re-check for a backend by appending a slash. This lets "foo" mean
- // "foo/" at the root level which is almost always what we want.
- adjustedPath += "/"
- mount, raw, ok = r.root.LongestPrefix(ns.Path + adjustedPath)
- }
- r.l.RUnlock()
- if !ok {
- return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath
- }
- req.Path = adjustedPath
- defer metrics.MeasureSince([]string{"route", string(req.Operation),
- strings.Replace(mount, "/", "-", -1)}, time.Now())
- re := raw.(*routeEntry)
-
- // Grab a read lock on the route entry, this protects against the backend
- // being reloaded during a request.
- re.l.RLock()
- defer re.l.RUnlock()
-
- // Filtered mounts will have a nil backend
- if re.backend == nil {
- return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath
- }
-
- // If the path is tainted, we reject any operation except for
- // Rollback and Revoke
- if re.tainted {
- switch req.Operation {
- case logical.RevokeOperation, logical.RollbackOperation:
- default:
- return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath
- }
- }
-
- // Adjust the path to exclude the routing prefix
- originalPath := req.Path
- req.Path = strings.TrimPrefix(ns.Path+req.Path, mount)
- req.MountPoint = mount
- req.MountType = re.mountEntry.Type
- if req.Path == "/" {
- req.Path = ""
- }
-
- originalEntReq := req.EntReq()
-
- // Attach the storage view for the request
- req.Storage = re.storageView
-
- originalEntityID := req.EntityID
-
- // Hash the request token unless the request is being routed to the token
- // or system backend.
- clientToken := req.ClientToken
- switch {
- case strings.HasPrefix(originalPath, "auth/token/"):
- case strings.HasPrefix(originalPath, "sys/"):
- case strings.HasPrefix(originalPath, cubbyholeMountPath):
- if req.Operation == logical.RollbackOperation {
- // Backend doesn't support this and it can't properly look up a
- // cubbyhole ID so just return here
- return nil, false, false, nil
- }
-
- te := req.TokenEntry()
-
- if te == nil {
- return nil, false, false, fmt.Errorf("nil token entry")
- }
-
- if te.Type != logical.TokenTypeService {
- return logical.ErrorResponse(`cubbyhole operations are only supported by "service" type tokens`), false, false, nil
- }
-
- switch {
- case te.NamespaceID == namespace.RootNamespaceID && !strings.HasPrefix(req.ClientToken, "s."):
- // In order for the token store to revoke later, we need to have the same
- // salted ID, so we double-salt what's going to the cubbyhole backend
- salt, err := r.tokenStoreSaltFunc(ctx)
- if err != nil {
- return nil, false, false, err
- }
- req.ClientToken = re.SaltID(salt.SaltID(req.ClientToken))
-
- default:
- if te.CubbyholeID == "" {
- return nil, false, false, fmt.Errorf("empty cubbyhole id")
- }
- req.ClientToken = te.CubbyholeID
- }
-
- default:
- req.ClientToken = re.SaltID(req.ClientToken)
- }
-
- // Cache the pointer to the original connection object
- originalConn := req.Connection
-
- // Cache the identifier of the request
- originalReqID := req.ID
-
- // Cache the client token's number of uses in the request
- originalClientTokenRemainingUses := req.ClientTokenRemainingUses
- req.ClientTokenRemainingUses = 0
-
- origMFACreds := req.MFACreds
- req.MFACreds = nil
-
- // Cache the headers
- headers := req.Headers
-
- // Filter and add passthrough headers to the backend
- var passthroughRequestHeaders []string
- if rawVal, ok := re.mountEntry.synthesizedConfigCache.Load("passthrough_request_headers"); ok {
- passthroughRequestHeaders = rawVal.([]string)
- }
- req.Headers = filteredPassthroughHeaders(headers, passthroughRequestHeaders)
-
- // Cache the wrap info of the request
- var wrapInfo *logical.RequestWrapInfo
- if req.WrapInfo != nil {
- wrapInfo = &logical.RequestWrapInfo{
- TTL: req.WrapInfo.TTL,
- Format: req.WrapInfo.Format,
- SealWrap: req.WrapInfo.SealWrap,
- }
- }
-
- originalPolicyOverride := req.PolicyOverride
- reqTokenEntry := req.TokenEntry()
- req.SetTokenEntry(nil)
-
- // Reset the request before returning
- defer func() {
- req.Path = originalPath
- req.MountPoint = mount
- req.MountType = re.mountEntry.Type
- req.Connection = originalConn
- req.ID = originalReqID
- req.Storage = nil
- req.ClientToken = clientToken
- req.ClientTokenRemainingUses = originalClientTokenRemainingUses
- req.WrapInfo = wrapInfo
- req.Headers = headers
- req.PolicyOverride = originalPolicyOverride
- // This is only set in one place, after routing, so should never be set
- // by a backend
- req.SetLastRemoteWAL(0)
-
- // This will be used for attaching the mount accessor for the identities
- // returned by the authentication backends
- req.MountAccessor = re.mountEntry.Accessor
-
- req.EntityID = originalEntityID
-
- req.MFACreds = origMFACreds
-
- req.SetTokenEntry(reqTokenEntry)
- req.SetEntReq(originalEntReq)
- }()
-
- // Invoke the backend
- if existenceCheck {
- ok, exists, err := re.backend.HandleExistenceCheck(ctx, req)
- return nil, ok, exists, err
- } else {
- resp, err := re.backend.HandleRequest(ctx, req)
- if resp != nil &&
- resp.Auth != nil {
- // When a token gets renewed, the request hits this path and
- // reaches token store. Token store delegates the renewal to the
- // expiration manager. Expiration manager in-turn creates a
- // different logical request and forwards the request to the auth
- // backend that had initially authenticated the login request. The
- // forwarding to auth backend will make this code path hit for the
- // second time for the same renewal request. The accessors in the
- // Alias structs should be of the auth backend and not of the token
- // store. Therefore, avoiding the overwriting of accessors by
- // having a check for path prefix having "renew". This gets applied
- // for "renew" and "renew-self" requests.
- if !strings.HasPrefix(req.Path, "renew") {
- if resp.Auth.Alias != nil {
- resp.Auth.Alias.MountAccessor = re.mountEntry.Accessor
- }
- for _, alias := range resp.Auth.GroupAliases {
- alias.MountAccessor = re.mountEntry.Accessor
- }
- }
-
- switch re.mountEntry.Type {
- case "token", "ns_token":
- // Nothing; we respect what the token store is telling us and
- // we don't allow tuning
- default:
- switch re.mountEntry.Config.TokenType {
- case logical.TokenTypeService, logical.TokenTypeBatch:
- resp.Auth.TokenType = re.mountEntry.Config.TokenType
- case logical.TokenTypeDefault, logical.TokenTypeDefaultService:
- if resp.Auth.TokenType == logical.TokenTypeDefault {
- resp.Auth.TokenType = logical.TokenTypeService
- }
- case logical.TokenTypeDefaultBatch:
- if resp.Auth.TokenType == logical.TokenTypeDefault {
- resp.Auth.TokenType = logical.TokenTypeBatch
- }
- }
- }
- }
-
- return resp, false, false, err
- }
-}
-
-// RootPath checks if the given path requires root privileges
-func (r *Router) RootPath(ctx context.Context, path string) bool {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return false
- }
-
- adjustedPath := ns.Path + path
-
- r.l.RLock()
- mount, raw, ok := r.root.LongestPrefix(adjustedPath)
- r.l.RUnlock()
- if !ok {
- return false
- }
- re := raw.(*routeEntry)
-
- // Trim to get remaining path
- remain := strings.TrimPrefix(adjustedPath, mount)
-
- // Check the rootPaths of this backend
- rootPaths := re.rootPaths.Load().(*radix.Tree)
- match, raw, ok := rootPaths.LongestPrefix(remain)
- if !ok {
- return false
- }
- prefixMatch := raw.(bool)
-
- // Handle the prefix match case
- if prefixMatch {
- return strings.HasPrefix(remain, match)
- }
-
- // Handle the exact match case
- return match == remain
-}
-
-// LoginPath checks if the given path is used for logins
-func (r *Router) LoginPath(ctx context.Context, path string) bool {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return false
- }
-
- adjustedPath := ns.Path + path
-
- r.l.RLock()
- mount, raw, ok := r.root.LongestPrefix(adjustedPath)
- r.l.RUnlock()
- if !ok {
- return false
- }
- re := raw.(*routeEntry)
-
- // Trim to get remaining path
- remain := strings.TrimPrefix(adjustedPath, mount)
-
- // Check the loginPaths of this backend
- loginPaths := re.loginPaths.Load().(*radix.Tree)
- match, raw, ok := loginPaths.LongestPrefix(remain)
- if !ok {
- return false
- }
- prefixMatch := raw.(bool)
-
- // Handle the prefix match case
- if prefixMatch {
- return strings.HasPrefix(remain, match)
- }
-
- // Handle the exact match case
- return match == remain
-}
-
-// pathsToRadix converts a the mapping of special paths to a mapping
-// of special paths to radix trees.
-func pathsToRadix(paths []string) *radix.Tree {
- tree := radix.New()
- for _, path := range paths {
- // Check if this is a prefix or exact match
- prefixMatch := len(path) >= 1 && path[len(path)-1] == '*'
- if prefixMatch {
- path = path[:len(path)-1]
- }
-
- tree.Insert(path, prefixMatch)
- }
-
- return tree
-}
-
-// filteredPassthroughHeaders returns a headers map[string][]string that
-// contains the filtered values contained in passthroughHeaders. Filtering of
-// passthroughHeaders from the origHeaders is done is a case-insensitive manner.
-// Headers that match values from denylistHeaders will be ignored.
-func filteredPassthroughHeaders(origHeaders map[string][]string, passthroughHeaders []string) map[string][]string {
- retHeaders := make(map[string][]string)
-
- // Short-circuit if there's nothing to filter
- if len(passthroughHeaders) == 0 {
- return retHeaders
- }
-
- // Filter passthroughHeaders values through denyListHeaders first. Returns the
- // lowercased the complement set.
- passthroughHeadersSubset := strutil.Difference(passthroughHeaders, denylistHeaders, true)
-
- // Create a map that uses lowercased header values as the key and the original
- // header naming as the value for comparison down below.
- lowerHeadersRef := make(map[string]string, len(origHeaders))
- for key := range origHeaders {
- lowerHeadersRef[strings.ToLower(key)] = key
- }
-
- // Case-insensitive compare of passthrough headers against originating
- // headers. The returned headers will be the same casing as the originating
- // header name.
- for _, ph := range passthroughHeadersSubset {
- if header, ok := lowerHeadersRef[ph]; ok {
- retHeaders[header] = origHeaders[header]
- }
- }
-
- return retHeaders
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/router_access.go b/vendor/github.com/hashicorp/vault/vault/router_access.go
deleted file mode 100644
index 90335d7a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/router_access.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package vault
-
-import "context"
-
-// RouterAccess provides access into some things necessary for testing
-type RouterAccess struct {
- c *Core
-}
-
-func NewRouterAccess(c *Core) *RouterAccess {
- return &RouterAccess{c: c}
-}
-
-func (r *RouterAccess) StoragePrefixByAPIPath(ctx context.Context, path string) (string, bool) {
- return r.c.router.MatchingStoragePrefixByAPIPath(ctx, path)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal.go b/vendor/github.com/hashicorp/vault/vault/seal.go
deleted file mode 100644
index 08249a0a..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal.go
+++ /dev/null
@@ -1,376 +0,0 @@
-package vault
-
-import (
- "bytes"
- "context"
- "crypto/subtle"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "sync/atomic"
-
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/vault/seal"
-
- "github.com/keybase/go-crypto/openpgp"
- "github.com/keybase/go-crypto/openpgp/packet"
-)
-
-const (
- // barrierSealConfigPath is the path used to store our seal configuration.
- // This value is stored in plaintext, since we must be able to read it even
- // with the Vault sealed. This is required so that we know how many secret
- // parts must be used to reconstruct the master key.
- barrierSealConfigPath = "core/seal-config"
-
- // recoverySealConfigPath is the path to the recovery key seal
- // configuration. It lives inside the barrier.
- // DEPRECATED: Use recoverySealConfigPlaintextPath instead.
- recoverySealConfigPath = "core/recovery-seal-config"
-
- // recoverySealConfigPlaintextPath is the path to the recovery key seal
- // configuration. This is stored in plaintext so that we can perform
- // auto-unseal.
- recoverySealConfigPlaintextPath = "core/recovery-config"
-
- // recoveryKeyPath is the path to the recovery key
- recoveryKeyPath = "core/recovery-key"
-
- // StoredBarrierKeysPath is the path used for storing HSM-encrypted unseal keys
- StoredBarrierKeysPath = "core/hsm/barrier-unseal-keys"
-
- // hsmStoredIVPath is the path to the initialization vector for stored keys
- hsmStoredIVPath = "core/hsm/iv"
-)
-
-const (
- RecoveryTypeUnsupported = "unsupported"
- RecoveryTypeShamir = "shamir"
-)
-
-type Seal interface {
- SetCore(*Core)
- Init(context.Context) error
- Finalize(context.Context) error
-
- StoredKeysSupported() bool
- SetStoredKeys(context.Context, [][]byte) error
- GetStoredKeys(context.Context) ([][]byte, error)
-
- BarrierType() string
- BarrierConfig(context.Context) (*SealConfig, error)
- SetBarrierConfig(context.Context, *SealConfig) error
- SetCachedBarrierConfig(*SealConfig)
-
- RecoveryKeySupported() bool
- RecoveryType() string
- RecoveryConfig(context.Context) (*SealConfig, error)
- SetRecoveryConfig(context.Context, *SealConfig) error
- SetCachedRecoveryConfig(*SealConfig)
- SetRecoveryKey(context.Context, []byte) error
- VerifyRecoveryKey(context.Context, []byte) error
-}
-
-type defaultSeal struct {
- config atomic.Value
- core *Core
- PretendToAllowStoredShares bool
- PretendToAllowRecoveryKeys bool
- PretendRecoveryKey []byte
-}
-
-func NewDefaultSeal() Seal {
- ret := &defaultSeal{}
- ret.config.Store((*SealConfig)(nil))
- return ret
-}
-
-func (d *defaultSeal) checkCore() error {
- if d.core == nil {
- return fmt.Errorf("seal does not have a core set")
- }
- return nil
-}
-
-func (d *defaultSeal) SetCore(core *Core) {
- d.core = core
-}
-
-func (d *defaultSeal) Init(ctx context.Context) error {
- return nil
-}
-
-func (d *defaultSeal) Finalize(ctx context.Context) error {
- return nil
-}
-
-func (d *defaultSeal) BarrierType() string {
- return seal.Shamir
-}
-
-func (d *defaultSeal) StoredKeysSupported() bool {
- return d.PretendToAllowStoredShares
-}
-
-func (d *defaultSeal) RecoveryKeySupported() bool {
- return d.PretendToAllowRecoveryKeys
-}
-
-func (d *defaultSeal) SetStoredKeys(ctx context.Context, keys [][]byte) error {
- return fmt.Errorf("stored keys are not supported")
-}
-
-func (d *defaultSeal) GetStoredKeys(ctx context.Context) ([][]byte, error) {
- return nil, fmt.Errorf("stored keys are not supported")
-}
-
-func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) {
- if d.config.Load().(*SealConfig) != nil {
- return d.config.Load().(*SealConfig).Clone(), nil
- }
-
- if err := d.checkCore(); err != nil {
- return nil, err
- }
-
- // Fetch the core configuration
- pe, err := d.core.physical.Get(ctx, barrierSealConfigPath)
- if err != nil {
- d.core.logger.Error("failed to read seal configuration", "error", err)
- return nil, errwrap.Wrapf("failed to check seal configuration: {{err}}", err)
- }
-
- // If the seal configuration is missing, we are not initialized
- if pe == nil {
- d.core.logger.Info("seal configuration missing, not initialized")
- return nil, nil
- }
-
- var conf SealConfig
-
- // Decode the barrier entry
- if err := jsonutil.DecodeJSON(pe.Value, &conf); err != nil {
- d.core.logger.Error("failed to decode seal configuration", "error", err)
- return nil, errwrap.Wrapf("failed to decode seal configuration: {{err}}", err)
- }
-
- switch conf.Type {
- // This case should not be valid for other types as only this is the default
- case "":
- conf.Type = d.BarrierType()
- case d.BarrierType():
- default:
- d.core.logger.Error("barrier seal type does not match expected type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType())
- return nil, fmt.Errorf("barrier seal type of %q does not match expected type of %q", conf.Type, d.BarrierType())
- }
-
- // Check for a valid seal configuration
- if err := conf.Validate(); err != nil {
- d.core.logger.Error("invalid seal configuration", "error", err)
- return nil, errwrap.Wrapf("seal validation failed: {{err}}", err)
- }
-
- d.config.Store(&conf)
- return conf.Clone(), nil
-}
-
-func (d *defaultSeal) SetBarrierConfig(ctx context.Context, config *SealConfig) error {
- if err := d.checkCore(); err != nil {
- return err
- }
-
- // Provide a way to wipe out the cached value (also prevents actually
- // saving a nil config)
- if config == nil {
- d.config.Store((*SealConfig)(nil))
- return nil
- }
-
- config.Type = d.BarrierType()
-
- // Encode the seal configuration
- buf, err := json.Marshal(config)
- if err != nil {
- return errwrap.Wrapf("failed to encode seal configuration: {{err}}", err)
- }
-
- // Store the seal configuration
- pe := &physical.Entry{
- Key: barrierSealConfigPath,
- Value: buf,
- }
-
- if err := d.core.physical.Put(ctx, pe); err != nil {
- d.core.logger.Error("failed to write seal configuration", "error", err)
- return errwrap.Wrapf("failed to write seal configuration: {{err}}", err)
- }
-
- d.config.Store(config.Clone())
-
- return nil
-}
-
-func (d *defaultSeal) SetCachedBarrierConfig(config *SealConfig) {
- d.config.Store(config)
-}
-
-func (d *defaultSeal) RecoveryType() string {
- if d.PretendToAllowRecoveryKeys {
- return RecoveryTypeShamir
- }
- return RecoveryTypeUnsupported
-}
-
-func (d *defaultSeal) RecoveryConfig(ctx context.Context) (*SealConfig, error) {
- if d.PretendToAllowRecoveryKeys {
- return &SealConfig{
- SecretShares: 5,
- SecretThreshold: 3,
- }, nil
- }
- return nil, fmt.Errorf("recovery not supported")
-}
-
-func (d *defaultSeal) SetRecoveryConfig(ctx context.Context, config *SealConfig) error {
- if d.PretendToAllowRecoveryKeys {
- return nil
- }
- return fmt.Errorf("recovery not supported")
-}
-
-func (d *defaultSeal) SetCachedRecoveryConfig(config *SealConfig) {
-}
-
-func (d *defaultSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error {
- if d.PretendToAllowRecoveryKeys {
- if subtle.ConstantTimeCompare(key, d.PretendRecoveryKey) == 1 {
- return nil
- }
- return fmt.Errorf("mismatch")
- }
- return fmt.Errorf("recovery not supported")
-}
-
-func (d *defaultSeal) SetRecoveryKey(ctx context.Context, key []byte) error {
- if d.PretendToAllowRecoveryKeys {
- d.PretendRecoveryKey = key
- return nil
- }
- return fmt.Errorf("recovery not supported")
-}
-
-// SealConfig is used to describe the seal configuration
-type SealConfig struct {
- // The type, for sanity checking
- Type string `json:"type"`
-
- // SecretShares is the number of shares the secret is split into. This is
- // the N value of Shamir.
- SecretShares int `json:"secret_shares"`
-
- // SecretThreshold is the number of parts required to open the vault. This
- // is the T value of Shamir.
- SecretThreshold int `json:"secret_threshold"`
-
- // PGPKeys is the array of public PGP keys used, if requested, to encrypt
- // the output unseal tokens. If provided, it sets the value of
- // SecretShares. Ordering is important.
- PGPKeys []string `json:"pgp_keys"`
-
- // Nonce is a nonce generated by Vault used to ensure that when unseal keys
- // are submitted for a rekey operation, the rekey operation itself is the
- // one intended. This prevents hijacking of the rekey operation, since it
- // is unauthenticated.
- Nonce string `json:"nonce"`
-
- // Backup indicates whether or not a backup of PGP-encrypted unseal keys
- // should be stored at coreUnsealKeysBackupPath after successful rekeying.
- Backup bool `json:"backup"`
-
- // How many keys to store, for seals that support storage.
- StoredShares int `json:"stored_shares"`
-
- // Stores the progress of the rekey operation (key shares)
- RekeyProgress [][]byte `json:"-"`
-
- // VerificationRequired indicates that after a rekey validation must be
- // performed (via providing shares from the new key) before the new key is
- // actually installed. This is omitted from JSON as we don't persist the
- // new key, it lives only in memory.
- VerificationRequired bool `json:"-"`
-
- // VerificationKey is the new key that we will roll to after successful
- // validation
- VerificationKey []byte `json:"-"`
-
- // VerificationNonce stores the current operation nonce for verification
- VerificationNonce string `json:"-"`
-
- // Stores the progress of the verification operation (key shares)
- VerificationProgress [][]byte `json:"-"`
-}
-
-// Validate is used to sanity check the seal configuration
-func (s *SealConfig) Validate() error {
- if s.SecretShares < 1 {
- return fmt.Errorf("shares must be at least one")
- }
- if s.SecretThreshold < 1 {
- return fmt.Errorf("threshold must be at least one")
- }
- if s.SecretShares > 1 && s.SecretThreshold == 1 {
- return fmt.Errorf("threshold must be greater than one for multiple shares")
- }
- if s.SecretShares > 255 {
- return fmt.Errorf("shares must be less than 256")
- }
- if s.SecretThreshold > 255 {
- return fmt.Errorf("threshold must be less than 256")
- }
- if s.SecretThreshold > s.SecretShares {
- return fmt.Errorf("threshold cannot be larger than shares")
- }
- if s.StoredShares > s.SecretShares {
- return fmt.Errorf("stored keys cannot be larger than shares")
- }
- if len(s.PGPKeys) > 0 && len(s.PGPKeys) != s.SecretShares-s.StoredShares {
- return fmt.Errorf("count mismatch between number of provided PGP keys and number of shares")
- }
- if len(s.PGPKeys) > 0 {
- for _, keystring := range s.PGPKeys {
- data, err := base64.StdEncoding.DecodeString(keystring)
- if err != nil {
- return errwrap.Wrapf("error decoding given PGP key: {{err}}", err)
- }
- _, err = openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data)))
- if err != nil {
- return errwrap.Wrapf("error parsing given PGP key: {{err}}", err)
- }
- }
- }
- return nil
-}
-
-func (s *SealConfig) Clone() *SealConfig {
- ret := &SealConfig{
- Type: s.Type,
- SecretShares: s.SecretShares,
- SecretThreshold: s.SecretThreshold,
- Nonce: s.Nonce,
- Backup: s.Backup,
- StoredShares: s.StoredShares,
- VerificationRequired: s.VerificationRequired,
- VerificationNonce: s.VerificationNonce,
- }
- if len(s.PGPKeys) > 0 {
- ret.PGPKeys = make([]string, len(s.PGPKeys))
- copy(ret.PGPKeys, s.PGPKeys)
- }
- if len(s.VerificationKey) > 0 {
- ret.VerificationKey = make([]byte, len(s.VerificationKey))
- copy(ret.VerificationKey, s.VerificationKey)
- }
- return ret
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal/envelope.go b/vendor/github.com/hashicorp/vault/vault/seal/envelope.go
deleted file mode 100644
index cdd6fcb8..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal/envelope.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package seal
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "errors"
- "time"
-
- metrics "github.com/armon/go-metrics"
- "github.com/hashicorp/errwrap"
- uuid "github.com/hashicorp/go-uuid"
-)
-
-type Envelope struct{}
-
-type EnvelopeInfo struct {
- Ciphertext []byte
- Key []byte
- IV []byte
-}
-
-func NewEnvelope() *Envelope {
- return &Envelope{}
-}
-
-func (e *Envelope) Encrypt(plaintext []byte) (*EnvelopeInfo, error) {
- defer metrics.MeasureSince([]string{"seal", "envelope", "encrypt"}, time.Now())
-
- // Generate DEK
- key, err := uuid.GenerateRandomBytes(32)
- if err != nil {
- return nil, err
- }
- iv, err := uuid.GenerateRandomBytes(12)
- if err != nil {
- return nil, err
- }
- aead, err := e.aeadEncrypter(key)
- if err != nil {
- return nil, err
- }
- return &EnvelopeInfo{
- Ciphertext: aead.Seal(nil, iv, plaintext, nil),
- Key: key,
- IV: iv,
- }, nil
-}
-
-func (e *Envelope) Decrypt(data *EnvelopeInfo) ([]byte, error) {
- defer metrics.MeasureSince([]string{"seal", "envelope", "decrypt"}, time.Now())
-
- aead, err := e.aeadEncrypter(data.Key)
- if err != nil {
- return nil, err
- }
- return aead.Open(nil, data.IV, data.Ciphertext, nil)
-}
-
-func (e *Envelope) aeadEncrypter(key []byte) (cipher.AEAD, error) {
- aesCipher, err := aes.NewCipher(key)
- if err != nil {
- return nil, errwrap.Wrapf("failed to create cipher: {{err}}", err)
- }
-
- // Create the GCM mode AEAD
- gcm, err := cipher.NewGCM(aesCipher)
- if err != nil {
- return nil, errors.New("failed to initialize GCM mode")
- }
-
- return gcm, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal/seal.go b/vendor/github.com/hashicorp/vault/vault/seal/seal.go
deleted file mode 100644
index b80217a0..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal/seal.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package seal
-
-import (
- "context"
-
- "github.com/hashicorp/vault/physical"
-)
-
-const (
- Shamir = "shamir"
- PKCS11 = "pkcs11"
- AliCloudKMS = "alicloudkms"
- AWSKMS = "awskms"
- GCPCKMS = "gcpckms"
- AzureKeyVault = "azurekeyvault"
- Test = "test-auto"
-
- // HSMAutoDeprecated is a deprecated seal type prior to 0.9.0.
- // It is still referenced in certain code paths for upgrade purporses
- HSMAutoDeprecated = "hsm-auto"
-)
-
-// Access is the embedded implemention of autoSeal that contains logic
-// specific to encrypting and decrypting data, or in this case keys.
-type Access interface {
- SealType() string
- KeyID() string
-
- Init(context.Context) error
- Finalize(context.Context) error
-
- Encrypt(context.Context, []byte) (*physical.EncryptedBlobInfo, error)
- Decrypt(context.Context, *physical.EncryptedBlobInfo) ([]byte, error)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal/seal_testing.go b/vendor/github.com/hashicorp/vault/vault/seal/seal_testing.go
deleted file mode 100644
index 6ce03b42..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal/seal_testing.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package seal
-
-import (
- "context"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/vault/physical"
-)
-
-type TestSeal struct {
- logger log.Logger
-}
-
-var _ Access = (*TestSeal)(nil)
-
-func NewTestSeal(logger log.Logger) *TestSeal {
- return &TestSeal{
- logger: logger,
- }
-}
-
-func (s *TestSeal) Init(_ context.Context) error {
- return nil
-}
-
-func (t *TestSeal) Finalize(_ context.Context) error {
- return nil
-}
-
-func (t *TestSeal) SealType() string {
- return Test
-}
-
-func (t *TestSeal) KeyID() string {
- return "static-key"
-}
-
-func (t *TestSeal) Encrypt(_ context.Context, plaintext []byte) (*physical.EncryptedBlobInfo, error) {
- return &physical.EncryptedBlobInfo{
- Ciphertext: ReverseBytes(plaintext),
- }, nil
-}
-
-func (t *TestSeal) Decrypt(_ context.Context, dwi *physical.EncryptedBlobInfo) ([]byte, error) {
- return ReverseBytes(dwi.Ciphertext), nil
-}
-
-// reverseBytes is a helper to simulate "encryption/decryption"
-// on protected values.
-func ReverseBytes(in []byte) []byte {
- out := make([]byte, len(in))
- for i := 0; i < len(in); i++ {
- out[i] = in[len(in)-1-i]
- }
- return out
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal_access.go b/vendor/github.com/hashicorp/vault/vault/seal_access.go
deleted file mode 100644
index f4a31dc9..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal_access.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package vault
-
-import (
- "context"
- "fmt"
-)
-
-// SealAccess is a wrapper around Seal that exposes accessor methods
-// through Core.SealAccess() while restricting the ability to modify
-// Core.seal itself.
-type SealAccess struct {
- seal Seal
-}
-
-func NewSealAccess(seal Seal) *SealAccess {
- return &SealAccess{seal: seal}
-}
-
-func (s *SealAccess) StoredKeysSupported() bool {
- return s.seal.StoredKeysSupported()
-}
-
-func (s *SealAccess) BarrierType() string {
- return s.seal.BarrierType()
-}
-
-func (s *SealAccess) BarrierConfig(ctx context.Context) (*SealConfig, error) {
- return s.seal.BarrierConfig(ctx)
-}
-
-func (s *SealAccess) RecoveryKeySupported() bool {
- return s.seal.RecoveryKeySupported()
-}
-
-func (s *SealAccess) RecoveryConfig(ctx context.Context) (*SealConfig, error) {
- return s.seal.RecoveryConfig(ctx)
-}
-
-func (s *SealAccess) VerifyRecoveryKey(ctx context.Context, key []byte) error {
- return s.seal.VerifyRecoveryKey(ctx, key)
-}
-
-func (s *SealAccess) ClearCaches(ctx context.Context) {
- s.seal.SetBarrierConfig(ctx, nil)
- if s.RecoveryKeySupported() {
- s.seal.SetRecoveryConfig(ctx, nil)
- }
-}
-
-type SealAccessTestingParams struct {
- PretendToAllowStoredShares bool
- PretendToAllowRecoveryKeys bool
- PretendRecoveryKey []byte
-}
-
-func (s *SealAccess) SetTestingParams(params *SealAccessTestingParams) error {
- d, ok := s.seal.(*defaultSeal)
- if !ok {
- return fmt.Errorf("not a defaultseal")
- }
- d.PretendToAllowRecoveryKeys = params.PretendToAllowRecoveryKeys
- d.PretendToAllowStoredShares = params.PretendToAllowStoredShares
- if params.PretendRecoveryKey != nil {
- d.PretendRecoveryKey = params.PretendRecoveryKey
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal_autoseal.go b/vendor/github.com/hashicorp/vault/vault/seal_autoseal.go
deleted file mode 100644
index 1a46d263..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal_autoseal.go
+++ /dev/null
@@ -1,467 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/subtle"
- "encoding/json"
- "fmt"
- "sync/atomic"
-
- proto "github.com/golang/protobuf/proto"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/physical"
- "github.com/hashicorp/vault/vault/seal"
-)
-
-// barrierTypeUpgradeCheck checks for backwards compat on barrier type, not
-// applicable in the OSS side
-var barrierTypeUpgradeCheck = func(_ string, _ *SealConfig) {}
-
-// autoSeal is a Seal implementation that contains logic for encrypting and
-// decrypting stored keys via an underlying AutoSealAccess implementation, as
-// well as logic related to recovery keys and barrier config.
-type autoSeal struct {
- seal.Access
-
- barrierConfig atomic.Value
- recoveryConfig atomic.Value
- core *Core
-}
-
-// Ensure we are implementing the Seal interface
-var _ Seal = (*autoSeal)(nil)
-
-func NewAutoSeal(lowLevel seal.Access) Seal {
- ret := &autoSeal{
- Access: lowLevel,
- }
- ret.barrierConfig.Store((*SealConfig)(nil))
- ret.recoveryConfig.Store((*SealConfig)(nil))
- return ret
-}
-
-func (d *autoSeal) checkCore() error {
- if d.core == nil {
- return fmt.Errorf("seal does not have a core set")
- }
- return nil
-}
-
-func (d *autoSeal) SetCore(core *Core) {
- d.core = core
-}
-
-func (d *autoSeal) Init(ctx context.Context) error {
- return d.Access.Init(ctx)
-}
-
-func (d *autoSeal) Finalize(ctx context.Context) error {
- return d.Access.Finalize(ctx)
-}
-
-func (d *autoSeal) BarrierType() string {
- return d.SealType()
-}
-
-func (d *autoSeal) StoredKeysSupported() bool {
- return true
-}
-
-func (d *autoSeal) RecoveryKeySupported() bool {
- return true
-}
-
-// SetStoredKeys uses the autoSeal.Access.Encrypts method to wrap the keys. The stored entry
-// does not need to be seal wrapped in this case.
-func (d *autoSeal) SetStoredKeys(ctx context.Context, keys [][]byte) error {
- if keys == nil {
- return fmt.Errorf("keys were nil")
- }
- if len(keys) == 0 {
- return fmt.Errorf("no keys provided")
- }
-
- buf, err := json.Marshal(keys)
- if err != nil {
- return errwrap.Wrapf("failed to encode keys for storage: {{err}}", err)
- }
-
- // Encrypt and marshal the keys
- blobInfo, err := d.Encrypt(ctx, buf)
- if err != nil {
- return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)
- }
-
- value, err := proto.Marshal(blobInfo)
- if err != nil {
- return errwrap.Wrapf("failed to marshal value for storage: {{err}}", err)
- }
-
- // Store the seal configuration.
- pe := &physical.Entry{
- Key: StoredBarrierKeysPath,
- Value: value,
- }
-
- if err := d.core.physical.Put(ctx, pe); err != nil {
- return errwrap.Wrapf("failed to write keys to storage: {{err}}", err)
- }
-
- return nil
-}
-
-// GetStoredKeys retrieves the key shares by unwrapping the encrypted key using the
-// autoseal.
-func (d *autoSeal) GetStoredKeys(ctx context.Context) ([][]byte, error) {
- pe, err := d.core.physical.Get(ctx, StoredBarrierKeysPath)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch stored keys: {{err}}", err)
- }
-
- // This is not strictly an error; we may not have any stored keys, for
- // instance, if we're not initialized
- if pe == nil {
- return nil, nil
- }
-
- blobInfo := &physical.EncryptedBlobInfo{}
- if err := proto.Unmarshal(pe.Value, blobInfo); err != nil {
- return nil, errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err)
- }
-
- pt, err := d.Decrypt(ctx, blobInfo)
- if err != nil {
- return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err)
- }
-
- // Decode the barrier entry
- var keys [][]byte
- if err := json.Unmarshal(pt, &keys); err != nil {
- return nil, fmt.Errorf("failed to decode stored keys: %v, plaintext was %q", err, pe.Value)
- }
-
- return keys, nil
-}
-
-func (d *autoSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) {
- if d.barrierConfig.Load().(*SealConfig) != nil {
- return d.barrierConfig.Load().(*SealConfig).Clone(), nil
- }
-
- if err := d.checkCore(); err != nil {
- return nil, err
- }
-
- sealType := "barrier"
-
- entry, err := d.core.physical.Get(ctx, barrierSealConfigPath)
- if err != nil {
- d.core.logger.Error("autoseal: failed to read seal configuration", "seal_type", sealType, "error", err)
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read %q seal configuration: {{err}}", sealType), err)
- }
-
- // If the seal configuration is missing, we are not initialized
- if entry == nil {
- if d.core.logger.IsInfo() {
- d.core.logger.Info("autoseal: seal configuration missing, not initialized", "seal_type", sealType)
- }
- return nil, nil
- }
-
- conf := &SealConfig{}
- err = json.Unmarshal(entry.Value, conf)
- if err != nil {
- d.core.logger.Error("autoseal: failed to decode seal configuration", "seal_type", sealType, "error", err)
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode %q seal configuration: {{err}}", sealType), err)
- }
-
- // Check for a valid seal configuration
- if err := conf.Validate(); err != nil {
- d.core.logger.Error("autoseal: invalid seal configuration", "seal_type", sealType, "error", err)
- return nil, errwrap.Wrapf(fmt.Sprintf("%q seal validation failed: {{err}}", sealType), err)
- }
-
- barrierTypeUpgradeCheck(d.BarrierType(), conf)
-
- if conf.Type != d.BarrierType() {
- d.core.logger.Error("autoseal: barrier seal type does not match loaded type", "seal_type", conf.Type, "loaded_type", d.BarrierType())
- return nil, fmt.Errorf("barrier seal type of %q does not match loaded type of %q", conf.Type, d.BarrierType())
- }
-
- d.barrierConfig.Store(conf)
- return conf.Clone(), nil
-}
-
-func (d *autoSeal) SetBarrierConfig(ctx context.Context, conf *SealConfig) error {
- if err := d.checkCore(); err != nil {
- return err
- }
-
- if conf == nil {
- d.barrierConfig.Store((*SealConfig)(nil))
- return nil
- }
-
- conf.Type = d.BarrierType()
-
- // Encode the seal configuration
- buf, err := json.Marshal(conf)
- if err != nil {
- return errwrap.Wrapf("failed to encode barrier seal configuration: {{err}}", err)
- }
-
- // Store the seal configuration
- pe := &physical.Entry{
- Key: barrierSealConfigPath,
- Value: buf,
- }
-
- if err := d.core.physical.Put(ctx, pe); err != nil {
- d.core.logger.Error("autoseal: failed to write barrier seal configuration", "error", err)
- return errwrap.Wrapf("failed to write barrier seal configuration: {{err}}", err)
- }
-
- d.barrierConfig.Store(conf.Clone())
-
- return nil
-}
-
-func (d *autoSeal) SetCachedBarrierConfig(config *SealConfig) {
- d.barrierConfig.Store(config)
-}
-
-func (d *autoSeal) RecoveryType() string {
- return RecoveryTypeShamir
-}
-
-// RecoveryConfig returns the recovery config on recoverySealConfigPlaintextPath.
-func (d *autoSeal) RecoveryConfig(ctx context.Context) (*SealConfig, error) {
- if d.recoveryConfig.Load().(*SealConfig) != nil {
- return d.recoveryConfig.Load().(*SealConfig).Clone(), nil
- }
-
- if err := d.checkCore(); err != nil {
- return nil, err
- }
-
- sealType := "recovery"
-
- var entry *physical.Entry
- var err error
- entry, err = d.core.physical.Get(ctx, recoverySealConfigPlaintextPath)
- if err != nil {
- d.core.logger.Error("autoseal: failed to read seal configuration", "seal_type", sealType, "error", err)
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to read %q seal configuration: {{err}}", sealType), err)
- }
-
- if entry == nil {
- if d.core.Sealed() {
- d.core.logger.Info("autoseal: seal configuration missing, but cannot check old path as core is sealed", "seal_type", sealType)
- return nil, nil
- }
-
- // Check the old recovery seal config path so an upgraded standby will
- // return the correct seal config
- be, err := d.core.barrier.Get(ctx, recoverySealConfigPath)
- if err != nil {
- return nil, errwrap.Wrapf("failed to read old recovery seal configuration: {{err}}", err)
- }
-
- // If the seal configuration is missing, then we are not initialized.
- if be == nil {
- if d.core.logger.IsInfo() {
- d.core.logger.Info("autoseal: seal configuration missing, not initialized", "seal_type", sealType)
- }
- return nil, nil
- }
-
- // Reconstruct the physical entry
- entry = &physical.Entry{
- Key: be.Key,
- Value: be.Value,
- }
- }
-
- conf := &SealConfig{}
- if err := json.Unmarshal(entry.Value, conf); err != nil {
- d.core.logger.Error("autoseal: failed to decode seal configuration", "seal_type", sealType, "error", err)
- return nil, errwrap.Wrapf(fmt.Sprintf("failed to decode %q seal configuration: {{err}}", sealType), err)
- }
-
- // Check for a valid seal configuration
- if err := conf.Validate(); err != nil {
- d.core.logger.Error("autoseal: invalid seal configuration", "seal_type", sealType, "error", err)
- return nil, errwrap.Wrapf(fmt.Sprintf("%q seal validation failed: {{err}}", sealType), err)
- }
-
- if conf.Type != d.RecoveryType() {
- d.core.logger.Error("autoseal: recovery seal type does not match loaded type", "seal_type", conf.Type, "loaded_type", d.RecoveryType())
- return nil, fmt.Errorf("recovery seal type of %q does not match loaded type of %q", conf.Type, d.RecoveryType())
- }
-
- d.recoveryConfig.Store(conf)
- return conf.Clone(), nil
-}
-
-// SetRecoveryConfig writes the recovery configuration to the physical storage
-// and sets it as the seal's recoveryConfig.
-func (d *autoSeal) SetRecoveryConfig(ctx context.Context, conf *SealConfig) error {
- if err := d.checkCore(); err != nil {
- return err
- }
-
- // Perform migration if applicable
- if err := d.migrateRecoveryConfig(ctx); err != nil {
- return err
- }
-
- if conf == nil {
- d.recoveryConfig.Store((*SealConfig)(nil))
- return nil
- }
-
- conf.Type = d.RecoveryType()
-
- // Encode the seal configuration
- buf, err := json.Marshal(conf)
- if err != nil {
- return errwrap.Wrapf("failed to encode recovery seal configuration: {{err}}", err)
- }
-
- // Store the seal configuration directly in the physical storage
- pe := &physical.Entry{
- Key: recoverySealConfigPlaintextPath,
- Value: buf,
- }
-
- if err := d.core.physical.Put(ctx, pe); err != nil {
- d.core.logger.Error("autoseal: failed to write recovery seal configuration", "error", err)
- return errwrap.Wrapf("failed to write recovery seal configuration: {{err}}", err)
- }
-
- d.recoveryConfig.Store(conf.Clone())
-
- return nil
-}
-
-func (d *autoSeal) SetCachedRecoveryConfig(config *SealConfig) {
- d.recoveryConfig.Store(config)
-}
-
-func (d *autoSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error {
- if key == nil {
- return fmt.Errorf("recovery key to verify is nil")
- }
-
- pe, err := d.core.physical.Get(ctx, recoveryKeyPath)
- if err != nil {
- d.core.logger.Error("autoseal: failed to read recovery key", "error", err)
- return errwrap.Wrapf("failed to read recovery key: {{err}}", err)
- }
- if pe == nil {
- d.core.logger.Warn("autoseal: no recovery key found")
- return fmt.Errorf("no recovery key found")
- }
-
- blobInfo := &physical.EncryptedBlobInfo{}
- if err := proto.Unmarshal(pe.Value, blobInfo); err != nil {
- return errwrap.Wrapf("failed to proto decode stored keys: {{err}}", err)
- }
-
- pt, err := d.Decrypt(ctx, blobInfo)
- if err != nil {
- return errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err)
- }
-
- // Check if provided key is same as the decrypted key
- if subtle.ConstantTimeCompare(key, pt) != 1 {
- // We may need to upgrade if the key is barrier-wrapped, so check
- barrierDec, err := d.core.BarrierEncryptorAccess().Decrypt(ctx, recoveryKeyPath, pt)
- if err == nil {
- // If we hit this, it got barrier-wrapped, so we need to re-set the
- // recovery key after unwrapping
- err := d.SetRecoveryKey(ctx, barrierDec)
- if err != nil {
- return err
- }
- }
- // Set pt to barrierDec for re-checking
- pt = barrierDec
- }
-
- if subtle.ConstantTimeCompare(key, pt) != 1 {
- return fmt.Errorf("recovery key does not match submitted values")
- }
-
- return nil
-}
-
-func (d *autoSeal) SetRecoveryKey(ctx context.Context, key []byte) error {
- if err := d.checkCore(); err != nil {
- return err
- }
-
- if key == nil {
- return fmt.Errorf("recovery key to store is nil")
- }
-
- // Encrypt and marshal the keys
- blobInfo, err := d.Encrypt(ctx, key)
- if err != nil {
- return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)
- }
-
- value, err := proto.Marshal(blobInfo)
- if err != nil {
- return errwrap.Wrapf("failed to marshal value for storage: {{err}}", err)
- }
-
- be := &physical.Entry{
- Key: recoveryKeyPath,
- Value: value,
- }
-
- if err := d.core.physical.Put(ctx, be); err != nil {
- d.core.logger.Error("autoseal: failed to write recovery key", "error", err)
- return errwrap.Wrapf("failed to write recovery key: {{err}}", err)
- }
-
- return nil
-}
-
-// migrateRecoveryConfig is a helper func to migrate the recovery config to
-// live outside the barrier. This is called from SetRecoveryConfig which is
-// always called with the stateLock.
-func (d *autoSeal) migrateRecoveryConfig(ctx context.Context) error {
- // Get config from the old recoverySealConfigPath path
- be, err := d.core.barrier.Get(ctx, recoverySealConfigPath)
- if err != nil {
- return errwrap.Wrapf("failed to read old recovery seal configuration during migration: {{err}}", err)
- }
-
- // If this entry is nil, then skip migration
- if be == nil {
- return nil
- }
-
- // Only log if we are performing the migration
- d.core.logger.Debug("migrating recovery seal configuration")
- defer d.core.logger.Debug("done migrating recovery seal configuration")
-
- // Perform migration
- pe := &physical.Entry{
- Key: recoverySealConfigPlaintextPath,
- Value: be.Value,
- }
-
- if err := d.core.physical.Put(ctx, pe); err != nil {
- return errwrap.Wrapf("failed to write recovery seal configuration during migration: {{err}}", err)
- }
-
- // Perform deletion of the old entry
- if err := d.core.barrier.Delete(ctx, recoverySealConfigPath); err != nil {
- return errwrap.Wrapf("failed to delete old recovery seal configuration during migration: {{err}}", err)
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal_testing.go b/vendor/github.com/hashicorp/vault/vault/seal_testing.go
deleted file mode 100644
index d97281b3..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal_testing.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package vault
-
-import (
- "context"
-
- "github.com/mitchellh/go-testing-interface"
-)
-
-var (
- TestCoreUnsealedWithConfigs = testCoreUnsealedWithConfigs
- TestSealDefConfigs = testSealDefConfigs
-)
-
-type TestSealOpts struct {
- StoredKeysDisabled bool
- RecoveryKeysDisabled bool
-}
-
-func testCoreUnsealedWithConfigs(t testing.T, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) {
- t.Helper()
- var opts *TestSealOpts
- if recoveryConf == nil {
- opts = &TestSealOpts{
- StoredKeysDisabled: true,
- RecoveryKeysDisabled: true,
- }
- }
- seal := NewTestSeal(t, opts)
- core := TestCoreWithSeal(t, seal, false)
- result, err := core.Initialize(context.Background(), &InitParams{
- BarrierConfig: barrierConf,
- RecoveryConfig: recoveryConf,
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- err = core.UnsealWithStoredKeys(context.Background())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if core.Sealed() {
- for _, key := range result.SecretShares {
- if _, err := core.Unseal(TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- if core.Sealed() {
- t.Fatal("should not be sealed")
- }
- }
-
- return core, result.SecretShares, result.RecoveryShares, result.RootToken
-}
-
-func testSealDefConfigs() (*SealConfig, *SealConfig) {
- return &SealConfig{
- SecretShares: 5,
- SecretThreshold: 3,
- }, nil
-}
-
-func TestCoreUnsealedWithConfigSealOpts(t testing.T, barrierConf, recoveryConf *SealConfig, sealOpts *TestSealOpts) (*Core, [][]byte, [][]byte, string) {
- seal := NewTestSeal(t, sealOpts)
- core := TestCoreWithSeal(t, seal, false)
- result, err := core.Initialize(context.Background(), &InitParams{
- BarrierConfig: barrierConf,
- RecoveryConfig: recoveryConf,
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- err = core.UnsealWithStoredKeys(context.Background())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- if core.Sealed() {
- for _, key := range result.SecretShares {
- if _, err := core.Unseal(TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- if core.Sealed() {
- t.Fatal("should not be sealed")
- }
- }
-
- return core, result.SecretShares, result.RecoveryShares, result.RootToken
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/seal_testing_util.go b/vendor/github.com/hashicorp/vault/vault/seal_testing_util.go
deleted file mode 100644
index 76568fad..00000000
--- a/vendor/github.com/hashicorp/vault/vault/seal_testing_util.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import "github.com/mitchellh/go-testing-interface"
-
-func NewTestSeal(testing.T, *TestSealOpts) Seal {
- return NewDefaultSeal()
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/sealunwrapper.go b/vendor/github.com/hashicorp/vault/vault/sealunwrapper.go
deleted file mode 100644
index c249fd77..00000000
--- a/vendor/github.com/hashicorp/vault/vault/sealunwrapper.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
- "fmt"
- "sync/atomic"
-
- proto "github.com/golang/protobuf/proto"
- log "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/physical"
-)
-
-// NewSealUnwrapper creates a new seal unwrapper
-func NewSealUnwrapper(underlying physical.Backend, logger log.Logger) physical.Backend {
- ret := &sealUnwrapper{
- underlying: underlying,
- logger: logger,
- locks: locksutil.CreateLocks(),
- allowUnwraps: new(uint32),
- }
-
- if underTxn, ok := underlying.(physical.Transactional); ok {
- return &transactionalSealUnwrapper{
- sealUnwrapper: ret,
- Transactional: underTxn,
- }
- }
-
- return ret
-}
-
-var _ physical.Backend = (*sealUnwrapper)(nil)
-var _ physical.Transactional = (*transactionalSealUnwrapper)(nil)
-
-type sealUnwrapper struct {
- underlying physical.Backend
- logger log.Logger
- locks []*locksutil.LockEntry
- allowUnwraps *uint32
-}
-
-// transactionalSealUnwrapper is a seal unwrapper that wraps a physical that is transactional
-type transactionalSealUnwrapper struct {
- *sealUnwrapper
- physical.Transactional
-}
-
-func (d *sealUnwrapper) Put(ctx context.Context, entry *physical.Entry) error {
- if entry == nil {
- return nil
- }
-
- locksutil.LockForKey(d.locks, entry.Key).Lock()
- defer locksutil.LockForKey(d.locks, entry.Key).Unlock()
-
- return d.underlying.Put(ctx, entry)
-}
-
-func (d *sealUnwrapper) Get(ctx context.Context, key string) (*physical.Entry, error) {
- entry, err := d.underlying.Get(ctx, key)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var performUnwrap bool
- se := &physical.EncryptedBlobInfo{}
- // If the value ends in our canary value, try to decode the bytes.
- eLen := len(entry.Value)
- if eLen > 0 && entry.Value[eLen-1] == 's' {
- if err := proto.Unmarshal(entry.Value[:eLen-1], se); err == nil {
- // We unmarshaled successfully which means we need to store it as a
- // non-proto message
- performUnwrap = true
- }
- }
- if !performUnwrap {
- return entry, nil
- }
- // It's actually encrypted and we can't read it
- if se.Wrapped {
- return nil, fmt.Errorf("cannot decode sealwrapped storage entry %q", entry.Key)
- }
- if atomic.LoadUint32(d.allowUnwraps) != 1 {
- return &physical.Entry{
- Key: entry.Key,
- Value: se.Ciphertext,
- }, nil
- }
-
- locksutil.LockForKey(d.locks, key).Lock()
- defer locksutil.LockForKey(d.locks, key).Unlock()
-
- // At this point we need to re-read and re-check
- entry, err = d.underlying.Get(ctx, key)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- performUnwrap = false
- se = &physical.EncryptedBlobInfo{}
- // If the value ends in our canary value, try to decode the bytes.
- eLen = len(entry.Value)
- if eLen > 0 && entry.Value[eLen-1] == 's' {
- // We ignore an error because the canary is not a guarantee; if it
- // doesn't decode, proceed normally
- if err := proto.Unmarshal(entry.Value[:eLen-1], se); err == nil {
- // We unmarshaled successfully which means we need to store it as a
- // non-proto message
- performUnwrap = true
- }
- }
- if !performUnwrap {
- return entry, nil
- }
- if se.Wrapped {
- return nil, fmt.Errorf("cannot decode sealwrapped storage entry %q", entry.Key)
- }
-
- entry = &physical.Entry{
- Key: entry.Key,
- Value: se.Ciphertext,
- }
-
- if atomic.LoadUint32(d.allowUnwraps) != 1 {
- return entry, nil
- }
- return entry, d.underlying.Put(ctx, entry)
-}
-
-func (d *sealUnwrapper) Delete(ctx context.Context, key string) error {
- locksutil.LockForKey(d.locks, key).Lock()
- defer locksutil.LockForKey(d.locks, key).Unlock()
-
- return d.underlying.Delete(ctx, key)
-}
-
-func (d *sealUnwrapper) List(ctx context.Context, prefix string) ([]string, error) {
- return d.underlying.List(ctx, prefix)
-}
-
-func (d *transactionalSealUnwrapper) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
- // Collect keys that need to be locked
- var keys []string
- for _, curr := range txns {
- keys = append(keys, curr.Entry.Key)
- }
- // Lock the keys
- for _, l := range locksutil.LocksForKeys(d.locks, keys) {
- l.Lock()
- defer l.Unlock()
- }
-
- if err := d.Transactional.Transaction(ctx, txns); err != nil {
- return err
- }
-
- return nil
-}
-
-// This should only run during preSeal which ensures that it can't be run
-// concurrently and that it will be run only by the active node
-func (d *sealUnwrapper) stopUnwraps() {
- atomic.StoreUint32(d.allowUnwraps, 0)
-}
-
-func (d *sealUnwrapper) runUnwraps() {
- // Allow key unwraps on key gets. This gets set only when running on the
- // active node to prevent standbys from changing data underneath the
- // primary
- atomic.StoreUint32(d.allowUnwraps, 1)
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/testing.go b/vendor/github.com/hashicorp/vault/vault/testing.go
deleted file mode 100644
index 91aa3802..00000000
--- a/vendor/github.com/hashicorp/vault/vault/testing.go
+++ /dev/null
@@ -1,1550 +0,0 @@
-package vault
-
-import (
- "bytes"
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/sha256"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/base64"
- "encoding/pem"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math/big"
- mathrand "math/rand"
- "net"
- "net/http"
- "os"
- "os/exec"
- "path/filepath"
- "sync"
- "sync/atomic"
- "time"
-
- log "github.com/hashicorp/go-hclog"
- "github.com/mitchellh/copystructure"
-
- "golang.org/x/crypto/ssh"
- "golang.org/x/net/http2"
-
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/vault/api"
- "github.com/hashicorp/vault/audit"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/logging"
- "github.com/hashicorp/vault/helper/reload"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/hashicorp/vault/physical"
- dbMysql "github.com/hashicorp/vault/plugins/database/mysql"
- dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql"
- "github.com/mitchellh/go-testing-interface"
-
- physInmem "github.com/hashicorp/vault/physical/inmem"
-)
-
-// This file contains a number of methods that are useful for unit
-// tests within other packages.
-
-const (
- testSharedPublicKey = `
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT
-`
- testSharedPrivateKey = `
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG
-A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A
-/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE
-mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+
-GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe
-nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x
-+aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+
-MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8
-Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h
-4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal
-oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+
-Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y
-6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G
-IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ
-CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2
-AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM
-kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe
-rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy
-AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9
-cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X
-5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D
-My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t
-O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi
-oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
-+B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs=
------END RSA PRIVATE KEY-----
-`
-)
-
-// TestCore returns a pure in-memory, uninitialized core for testing.
-func TestCore(t testing.T) *Core {
- return TestCoreWithSeal(t, nil, false)
-}
-
-// TestCoreRaw returns a pure in-memory, uninitialized core for testing. The raw
-// storage endpoints are enabled with this core.
-func TestCoreRaw(t testing.T) *Core {
- return TestCoreWithSeal(t, nil, true)
-}
-
-// TestCoreNewSeal returns a pure in-memory, uninitialized core with
-// the new seal configuration.
-func TestCoreNewSeal(t testing.T) *Core {
- seal := NewTestSeal(t, nil)
- return TestCoreWithSeal(t, seal, false)
-}
-
-// TestCoreWithConfig returns a pure in-memory, uninitialized core with the
-// specified core configurations overridden for testing.
-func TestCoreWithConfig(t testing.T, conf *CoreConfig) *Core {
- return TestCoreWithSealAndUI(t, conf)
-}
-
-// TestCoreWithSeal returns a pure in-memory, uninitialized core with the
-// specified seal for testing.
-func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core {
- conf := &CoreConfig{
- Seal: testSeal,
- EnableUI: false,
- EnableRaw: enableRaw,
- BuiltinRegistry: NewMockBuiltinRegistry(),
- }
- return TestCoreWithSealAndUI(t, conf)
-}
-
-func TestCoreUI(t testing.T, enableUI bool) *Core {
- conf := &CoreConfig{
- EnableUI: enableUI,
- EnableRaw: true,
- BuiltinRegistry: NewMockBuiltinRegistry(),
- }
- return TestCoreWithSealAndUI(t, conf)
-}
-
-func TestCoreWithSealAndUI(t testing.T, opts *CoreConfig) *Core {
- logger := logging.NewVaultLogger(log.Trace)
- physicalBackend, err := physInmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
-
- // Start off with base test core config
- conf := testCoreConfig(t, physicalBackend, logger)
-
- // Override config values with ones that gets passed in
- conf.EnableUI = opts.EnableUI
- conf.EnableRaw = opts.EnableRaw
- conf.Seal = opts.Seal
- conf.LicensingConfig = opts.LicensingConfig
- conf.DisableKeyEncodingChecks = opts.DisableKeyEncodingChecks
-
- c, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- return c
-}
-
-func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
- t.Helper()
- noopAudits := map[string]audit.Factory{
- "noop": func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) {
- view := &logical.InmemStorage{}
- view.Put(context.Background(), &logical.StorageEntry{
- Key: "salt",
- Value: []byte("foo"),
- })
- config.SaltConfig = &salt.Config{
- HMAC: sha256.New,
- HMACType: "hmac-sha256",
- }
- config.SaltView = view
- return &noopAudit{
- Config: config,
- }, nil
- },
- }
-
- noopBackends := make(map[string]logical.Factory)
- noopBackends["noop"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
- b := new(framework.Backend)
- b.Setup(ctx, config)
- b.BackendType = logical.TypeCredential
- return b, nil
- }
- noopBackends["http"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
- return new(rawHTTP), nil
- }
-
- credentialBackends := make(map[string]logical.Factory)
- for backendName, backendFactory := range noopBackends {
- credentialBackends[backendName] = backendFactory
- }
- for backendName, backendFactory := range testCredentialBackends {
- credentialBackends[backendName] = backendFactory
- }
-
- logicalBackends := make(map[string]logical.Factory)
- for backendName, backendFactory := range noopBackends {
- logicalBackends[backendName] = backendFactory
- }
-
- logicalBackends["kv"] = LeasedPassthroughBackendFactory
- for backendName, backendFactory := range testLogicalBackends {
- logicalBackends[backendName] = backendFactory
- }
-
- conf := &CoreConfig{
- Physical: physicalBackend,
- AuditBackends: noopAudits,
- LogicalBackends: logicalBackends,
- CredentialBackends: credentialBackends,
- DisableMlock: true,
- Logger: logger,
- BuiltinRegistry: NewMockBuiltinRegistry(),
- }
-
- return conf
-}
-
-// TestCoreInit initializes the core with a single key, and returns
-// the key that must be used to unseal the core and a root token.
-func TestCoreInit(t testing.T, core *Core) ([][]byte, string) {
- t.Helper()
- secretShares, _, root := TestCoreInitClusterWrapperSetup(t, core, nil, nil)
- return secretShares, root
-}
-
-func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, clusterAddrs []*net.TCPAddr, handler http.Handler) ([][]byte, [][]byte, string) {
- t.Helper()
- core.SetClusterListenerAddrs(clusterAddrs)
- core.SetClusterHandler(handler)
-
- barrierConfig := &SealConfig{
- SecretShares: 3,
- SecretThreshold: 3,
- }
-
- // If we support storing barrier keys, then set that to equal the min threshold to unseal
- if core.seal.StoredKeysSupported() {
- barrierConfig.StoredShares = barrierConfig.SecretThreshold
- }
-
- recoveryConfig := &SealConfig{
- SecretShares: 3,
- SecretThreshold: 3,
- }
-
- result, err := core.Initialize(context.Background(), &InitParams{
- BarrierConfig: barrierConfig,
- RecoveryConfig: recoveryConfig,
- })
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- return result.SecretShares, result.RecoveryShares, result.RootToken
-}
-
-func TestCoreUnseal(core *Core, key []byte) (bool, error) {
- return core.Unseal(key)
-}
-
-func TestCoreUnsealWithRecoveryKeys(core *Core, key []byte) (bool, error) {
- return core.UnsealWithRecoveryKeys(key)
-}
-
-// TestCoreUnsealed returns a pure in-memory core that is already
-// initialized and unsealed.
-func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
- t.Helper()
- core := TestCore(t)
- return testCoreUnsealed(t, core)
-}
-
-// TestCoreUnsealedRaw returns a pure in-memory core that is already
-// initialized, unsealed, and with raw endpoints enabled.
-func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) {
- t.Helper()
- core := TestCoreRaw(t)
- return testCoreUnsealed(t, core)
-}
-
-// TestCoreUnsealedWithConfig returns a pure in-memory core that is already
-// initialized, unsealed, with the any provided core config values overridden.
-func TestCoreUnsealedWithConfig(t testing.T, conf *CoreConfig) (*Core, [][]byte, string) {
- t.Helper()
- core := TestCoreWithConfig(t, conf)
- return testCoreUnsealed(t, core)
-}
-
-func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
- t.Helper()
- keys, token := TestCoreInit(t, core)
- for _, key := range keys {
- if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- if core.Sealed() {
- t.Fatal("should not be sealed")
- }
-
- return core, keys, token
-}
-
-func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) {
- t.Helper()
- logger := logging.NewVaultLogger(log.Trace)
- conf := testCoreConfig(t, backend, logger)
- conf.Seal = NewTestSeal(t, nil)
-
- core, err := NewCore(conf)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- keys, token := TestCoreInit(t, core)
- for _, key := range keys {
- if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
- t.Fatal(err)
- }
-
- if core.Sealed() {
- t.Fatal("should not be sealed")
- }
-
- return core, keys, token
-}
-
-// TestKeyCopy is a silly little function to just copy the key so that
-// it can be used with Unseal easily.
-func TestKeyCopy(key []byte) []byte {
- result := make([]byte, len(key))
- copy(result, key)
- return result
-}
-
-func TestDynamicSystemView(c *Core) *dynamicSystemView {
- me := &MountEntry{
- Config: MountConfig{
- DefaultLeaseTTL: 24 * time.Hour,
- MaxLeaseTTL: 2 * 24 * time.Hour,
- },
- }
-
- return &dynamicSystemView{c, me}
-}
-
-// TestAddTestPlugin registers the testFunc as part of the plugin command to the
-// plugin catalog. If provided, uses tmpDir as the plugin directory.
-func TestAddTestPlugin(t testing.T, c *Core, name string, pluginType consts.PluginType, testFunc string, env []string, tempDir string) {
- file, err := os.Open(os.Args[0])
- if err != nil {
- t.Fatal(err)
- }
- defer file.Close()
-
- dirPath := filepath.Dir(os.Args[0])
- fileName := filepath.Base(os.Args[0])
-
- if tempDir != "" {
- fi, err := file.Stat()
- if err != nil {
- t.Fatal(err)
- }
-
- // Copy over the file to the temp dir
- dst := filepath.Join(tempDir, fileName)
- out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
- if err != nil {
- t.Fatal(err)
- }
- defer out.Close()
-
- if _, err = io.Copy(out, file); err != nil {
- t.Fatal(err)
- }
- err = out.Sync()
- if err != nil {
- t.Fatal(err)
- }
-
- dirPath = tempDir
- }
-
- // Determine plugin directory full path, evaluating potential symlink path
- fullPath, err := filepath.EvalSymlinks(dirPath)
- if err != nil {
- t.Fatal(err)
- }
-
- reader, err := os.Open(filepath.Join(fullPath, fileName))
- if err != nil {
- t.Fatal(err)
- }
- defer reader.Close()
-
- // Find out the sha256
- hash := sha256.New()
-
- _, err = io.Copy(hash, reader)
- if err != nil {
- t.Fatal(err)
- }
-
- sum := hash.Sum(nil)
-
- // Set core's plugin directory and plugin catalog directory
- c.pluginDirectory = fullPath
- c.pluginCatalog.directory = fullPath
-
- args := []string{fmt.Sprintf("--test.run=%s", testFunc)}
- err = c.pluginCatalog.Set(context.Background(), name, pluginType, fileName, args, env, sum)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-var testLogicalBackends = map[string]logical.Factory{}
-var testCredentialBackends = map[string]logical.Factory{}
-
-// StartSSHHostTestServer starts the test server which responds to SSH
-// authentication. Used to test the SSH secret backend.
-func StartSSHHostTestServer() (string, error) {
- pubKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(testSharedPublicKey))
- if err != nil {
- return "", fmt.Errorf("error parsing public key")
- }
- serverConfig := &ssh.ServerConfig{
- PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
- if bytes.Compare(pubKey.Marshal(), key.Marshal()) == 0 {
- return &ssh.Permissions{}, nil
- } else {
- return nil, fmt.Errorf("key does not match")
- }
- },
- }
- signer, err := ssh.ParsePrivateKey([]byte(testSharedPrivateKey))
- if err != nil {
- panic("Error parsing private key")
- }
- serverConfig.AddHostKey(signer)
-
- soc, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return "", fmt.Errorf("error listening to connection")
- }
-
- go func() {
- for {
- conn, err := soc.Accept()
- if err != nil {
- panic(fmt.Sprintf("Error accepting incoming connection: %s", err))
- }
- defer conn.Close()
- sshConn, chanReqs, _, err := ssh.NewServerConn(conn, serverConfig)
- if err != nil {
- panic(fmt.Sprintf("Handshaking error: %v", err))
- }
-
- go func() {
- for chanReq := range chanReqs {
- go func(chanReq ssh.NewChannel) {
- if chanReq.ChannelType() != "session" {
- chanReq.Reject(ssh.UnknownChannelType, "unknown channel type")
- return
- }
-
- ch, requests, err := chanReq.Accept()
- if err != nil {
- panic(fmt.Sprintf("Error accepting channel: %s", err))
- }
-
- go func(ch ssh.Channel, in <-chan *ssh.Request) {
- for req := range in {
- executeServerCommand(ch, req)
- }
- }(ch, requests)
- }(chanReq)
- }
- sshConn.Close()
- }()
- }
- }()
- return soc.Addr().String(), nil
-}
-
-// This executes the commands requested to be run on the server.
-// Used to test the SSH secret backend.
-func executeServerCommand(ch ssh.Channel, req *ssh.Request) {
- command := string(req.Payload[4:])
- cmd := exec.Command("/bin/bash", []string{"-c", command}...)
- req.Reply(true, nil)
-
- cmd.Stdout = ch
- cmd.Stderr = ch
- cmd.Stdin = ch
-
- err := cmd.Start()
- if err != nil {
- panic(fmt.Sprintf("Error starting the command: '%s'", err))
- }
-
- go func() {
- _, err := cmd.Process.Wait()
- if err != nil {
- panic(fmt.Sprintf("Error while waiting for command to finish:'%s'", err))
- }
- ch.Close()
- }()
-}
-
-// This adds a credential backend for the test core. This needs to be
-// invoked before the test core is created.
-func AddTestCredentialBackend(name string, factory logical.Factory) error {
- if name == "" {
- return fmt.Errorf("missing backend name")
- }
- if factory == nil {
- return fmt.Errorf("missing backend factory function")
- }
- testCredentialBackends[name] = factory
- return nil
-}
-
-// This adds a logical backend for the test core. This needs to be
-// invoked before the test core is created.
-func AddTestLogicalBackend(name string, factory logical.Factory) error {
- if name == "" {
- return fmt.Errorf("missing backend name")
- }
- if factory == nil {
- return fmt.Errorf("missing backend factory function")
- }
- testLogicalBackends[name] = factory
- return nil
-}
-
-type noopAudit struct {
- Config *audit.BackendConfig
- salt *salt.Salt
- saltMutex sync.RWMutex
-}
-
-func (n *noopAudit) GetHash(ctx context.Context, data string) (string, error) {
- salt, err := n.Salt(ctx)
- if err != nil {
- return "", err
- }
- return salt.GetIdentifiedHMAC(data), nil
-}
-
-func (n *noopAudit) LogRequest(_ context.Context, _ *audit.LogInput) error {
- return nil
-}
-
-func (n *noopAudit) LogResponse(_ context.Context, _ *audit.LogInput) error {
- return nil
-}
-
-func (n *noopAudit) Reload(_ context.Context) error {
- return nil
-}
-
-func (n *noopAudit) Invalidate(_ context.Context) {
- n.saltMutex.Lock()
- defer n.saltMutex.Unlock()
- n.salt = nil
-}
-
-func (n *noopAudit) Salt(ctx context.Context) (*salt.Salt, error) {
- n.saltMutex.RLock()
- if n.salt != nil {
- defer n.saltMutex.RUnlock()
- return n.salt, nil
- }
- n.saltMutex.RUnlock()
- n.saltMutex.Lock()
- defer n.saltMutex.Unlock()
- if n.salt != nil {
- return n.salt, nil
- }
- salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig)
- if err != nil {
- return nil, err
- }
- n.salt = salt
- return salt, nil
-}
-
-type rawHTTP struct{}
-
-func (n *rawHTTP) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) {
- return &logical.Response{
- Data: map[string]interface{}{
- logical.HTTPStatusCode: 200,
- logical.HTTPContentType: "plain/text",
- logical.HTTPRawBody: []byte("hello world"),
- },
- }, nil
-}
-
-func (n *rawHTTP) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) {
- return false, false, nil
-}
-
-func (n *rawHTTP) SpecialPaths() *logical.Paths {
- return &logical.Paths{Unauthenticated: []string{"*"}}
-}
-
-func (n *rawHTTP) System() logical.SystemView {
- return logical.StaticSystemView{
- DefaultLeaseTTLVal: time.Hour * 24,
- MaxLeaseTTLVal: time.Hour * 24 * 32,
- }
-}
-
-func (n *rawHTTP) Logger() log.Logger {
- return logging.NewVaultLogger(log.Trace)
-}
-
-func (n *rawHTTP) Cleanup(ctx context.Context) {
- // noop
-}
-
-func (n *rawHTTP) Initialize(ctx context.Context) error {
- // noop
- return nil
-}
-
-func (n *rawHTTP) InvalidateKey(context.Context, string) {
- // noop
-}
-
-func (n *rawHTTP) Setup(ctx context.Context, config *logical.BackendConfig) error {
- // noop
- return nil
-}
-
-func (n *rawHTTP) Type() logical.BackendType {
- return logical.TypeLogical
-}
-
-func GenerateRandBytes(length int) ([]byte, error) {
- if length < 0 {
- return nil, fmt.Errorf("length must be >= 0")
- }
-
- buf := make([]byte, length)
- if length == 0 {
- return buf, nil
- }
-
- n, err := rand.Read(buf)
- if err != nil {
- return nil, err
- }
- if n != length {
- return nil, fmt.Errorf("unable to read %d bytes; only read %d", length, n)
- }
-
- return buf, nil
-}
-
-func TestWaitActive(t testing.T, core *Core) {
- t.Helper()
- if err := TestWaitActiveWithError(core); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestWaitActiveWithError(core *Core) error {
- start := time.Now()
- var standby bool
- var err error
- for time.Now().Sub(start) < time.Second {
- standby, err = core.Standby()
- if err != nil {
- return err
- }
- if !standby {
- break
- }
- }
- if standby {
- return errors.New("should not be in standby mode")
- }
- return nil
-}
-
-type TestCluster struct {
- BarrierKeys [][]byte
- RecoveryKeys [][]byte
- CACert *x509.Certificate
- CACertBytes []byte
- CACertPEM []byte
- CACertPEMFile string
- CAKey *ecdsa.PrivateKey
- CAKeyPEM []byte
- Cores []*TestClusterCore
- ID string
- RootToken string
- RootCAs *x509.CertPool
- TempDir string
-}
-
-func (c *TestCluster) Start() {
- for _, core := range c.Cores {
- if core.Server != nil {
- for _, ln := range core.Listeners {
- go core.Server.Serve(ln)
- }
- }
- }
-}
-
-// UnsealCores uses the cluster barrier keys to unseal the test cluster cores
-func (c *TestCluster) UnsealCores(t testing.T) {
- if err := c.UnsealCoresWithError(); err != nil {
- t.Fatal(err)
- }
-}
-
-func (c *TestCluster) UnsealCoresWithError() error {
- numCores := len(c.Cores)
-
- // Unseal first core
- for _, key := range c.BarrierKeys {
- if _, err := c.Cores[0].Unseal(TestKeyCopy(key)); err != nil {
- return fmt.Errorf("unseal err: %s", err)
- }
- }
-
- // Verify unsealed
- if c.Cores[0].Sealed() {
- return fmt.Errorf("should not be sealed")
- }
-
- if err := TestWaitActiveWithError(c.Cores[0].Core); err != nil {
- return err
- }
-
- // Unseal other cores
- for i := 1; i < numCores; i++ {
- for _, key := range c.BarrierKeys {
- if _, err := c.Cores[i].Core.Unseal(TestKeyCopy(key)); err != nil {
- return fmt.Errorf("unseal err: %s", err)
- }
- }
- }
-
- // Let them come fully up to standby
- time.Sleep(2 * time.Second)
-
- // Ensure cluster connection info is populated.
- // Other cores should not come up as leaders.
- for i := 1; i < numCores; i++ {
- isLeader, _, _, err := c.Cores[i].Leader()
- if err != nil {
- return err
- }
- if isLeader {
- return fmt.Errorf("core[%d] should not be leader", i)
- }
- }
-
- return nil
-}
-
-func (c *TestCluster) EnsureCoresSealed(t testing.T) {
- t.Helper()
- if err := c.ensureCoresSealed(); err != nil {
- t.Fatal(err)
- }
-}
-
-func CleanupClusters(clusters []*TestCluster) {
- wg := &sync.WaitGroup{}
- for _, cluster := range clusters {
- wg.Add(1)
- lc := cluster
- go func() {
- defer wg.Done()
- lc.Cleanup()
- }()
- }
- wg.Wait()
-}
-
-func (c *TestCluster) Cleanup() {
- // Close listeners
- wg := &sync.WaitGroup{}
- for _, core := range c.Cores {
- wg.Add(1)
- lc := core
-
- go func() {
- defer wg.Done()
- if lc.Listeners != nil {
- for _, ln := range lc.Listeners {
- ln.Close()
- }
- }
- if lc.licensingStopCh != nil {
- close(lc.licensingStopCh)
- lc.licensingStopCh = nil
- }
-
- if err := lc.Shutdown(); err != nil {
- lc.Logger().Error("error during shutdown; abandoning sealing", "error", err)
- } else {
- timeout := time.Now().Add(60 * time.Second)
- for {
- if time.Now().After(timeout) {
- lc.Logger().Error("timeout waiting for core to seal")
- }
- if lc.Sealed() {
- break
- }
- time.Sleep(250 * time.Millisecond)
- }
- }
- }()
- }
-
- wg.Wait()
-
- // Remove any temp dir that exists
- if c.TempDir != "" {
- os.RemoveAll(c.TempDir)
- }
-
- // Give time to actually shut down/clean up before the next test
- time.Sleep(time.Second)
-}
-
-func (c *TestCluster) ensureCoresSealed() error {
- for _, core := range c.Cores {
- if err := core.Shutdown(); err != nil {
- return err
- }
- timeout := time.Now().Add(60 * time.Second)
- for {
- if time.Now().After(timeout) {
- return fmt.Errorf("timeout waiting for core to seal")
- }
- if core.Sealed() {
- break
- }
- time.Sleep(250 * time.Millisecond)
- }
- }
- return nil
-}
-
-// UnsealWithStoredKeys uses stored keys to unseal the test cluster cores
-func (c *TestCluster) UnsealWithStoredKeys(t testing.T) error {
- for _, core := range c.Cores {
- if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
- return err
- }
- timeout := time.Now().Add(60 * time.Second)
- for {
- if time.Now().After(timeout) {
- return fmt.Errorf("timeout waiting for core to unseal")
- }
- if !core.Sealed() {
- break
- }
- time.Sleep(250 * time.Millisecond)
- }
- }
- return nil
-}
-
-func SetReplicationFailureMode(core *TestClusterCore, mode uint32) {
- atomic.StoreUint32(core.Core.replicationFailure, mode)
-}
-
-type TestListener struct {
- net.Listener
- Address *net.TCPAddr
-}
-
-type TestClusterCore struct {
- *Core
- CoreConfig *CoreConfig
- Client *api.Client
- Handler http.Handler
- Listeners []*TestListener
- ReloadFuncs *map[string][]reload.ReloadFunc
- ReloadFuncsLock *sync.RWMutex
- Server *http.Server
- ServerCert *x509.Certificate
- ServerCertBytes []byte
- ServerCertPEM []byte
- ServerKey *ecdsa.PrivateKey
- ServerKeyPEM []byte
- TLSConfig *tls.Config
- UnderlyingStorage physical.Backend
-}
-
-type TestClusterOptions struct {
- KeepStandbysSealed bool
- SkipInit bool
- HandlerFunc func(*HandlerProperties) http.Handler
- BaseListenAddress string
- NumCores int
- SealFunc func() Seal
- Logger log.Logger
- TempDir string
- CACert []byte
- CAKey *ecdsa.PrivateKey
-}
-
-var DefaultNumCores = 3
-
-type certInfo struct {
- cert *x509.Certificate
- certPEM []byte
- certBytes []byte
- key *ecdsa.PrivateKey
- keyPEM []byte
-}
-
-// NewTestCluster creates a new test cluster based on the provided core config
-// and test cluster options.
-//
-// N.B. Even though a single base CoreConfig is provided, NewTestCluster will instantiate a
-// core config for each core it creates. If separate seal per core is desired, opts.SealFunc
-// can be provided to generate a seal for each one. Otherwise, the provided base.Seal will be
-// shared among cores. NewCore's default behavior is to generate a new DefaultSeal if the
-// provided Seal in coreConfig (i.e. base.Seal) is nil.
-func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
- var err error
-
- var numCores int
- if opts == nil || opts.NumCores == 0 {
- numCores = DefaultNumCores
- } else {
- numCores = opts.NumCores
- }
-
- certIPs := []net.IP{
- net.IPv6loopback,
- net.ParseIP("127.0.0.1"),
- }
- var baseAddr *net.TCPAddr
- if opts != nil && opts.BaseListenAddress != "" {
- baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress)
- if err != nil {
- t.Fatal("could not parse given base IP")
- }
- certIPs = append(certIPs, baseAddr.IP)
- }
-
- var testCluster TestCluster
- if opts != nil && opts.TempDir != "" {
- if _, err := os.Stat(opts.TempDir); os.IsNotExist(err) {
- if err := os.MkdirAll(opts.TempDir, 0700); err != nil {
- t.Fatal(err)
- }
- }
- testCluster.TempDir = opts.TempDir
- } else {
- tempDir, err := ioutil.TempDir("", "vault-test-cluster-")
- if err != nil {
- t.Fatal(err)
- }
- testCluster.TempDir = tempDir
- }
-
- var caKey *ecdsa.PrivateKey
- if opts != nil && opts.CAKey != nil {
- caKey = opts.CAKey
- } else {
- caKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- if err != nil {
- t.Fatal(err)
- }
- }
- testCluster.CAKey = caKey
- var caBytes []byte
- if opts != nil && len(opts.CACert) > 0 {
- caBytes = opts.CACert
- } else {
- caCertTemplate := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: "localhost",
- },
- DNSNames: []string{"localhost"},
- IPAddresses: certIPs,
- KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
- SerialNumber: big.NewInt(mathrand.Int63()),
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: time.Now().Add(262980 * time.Hour),
- BasicConstraintsValid: true,
- IsCA: true,
- }
- caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey)
- if err != nil {
- t.Fatal(err)
- }
- }
- caCert, err := x509.ParseCertificate(caBytes)
- if err != nil {
- t.Fatal(err)
- }
- testCluster.CACert = caCert
- testCluster.CACertBytes = caBytes
- testCluster.RootCAs = x509.NewCertPool()
- testCluster.RootCAs.AddCert(caCert)
- caCertPEMBlock := &pem.Block{
- Type: "CERTIFICATE",
- Bytes: caBytes,
- }
- testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock)
- testCluster.CACertPEMFile = filepath.Join(testCluster.TempDir, "ca_cert.pem")
- err = ioutil.WriteFile(testCluster.CACertPEMFile, testCluster.CACertPEM, 0755)
- if err != nil {
- t.Fatal(err)
- }
- marshaledCAKey, err := x509.MarshalECPrivateKey(caKey)
- if err != nil {
- t.Fatal(err)
- }
- caKeyPEMBlock := &pem.Block{
- Type: "EC PRIVATE KEY",
- Bytes: marshaledCAKey,
- }
- testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock)
- err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0755)
- if err != nil {
- t.Fatal(err)
- }
-
- var certInfoSlice []*certInfo
-
- //
- // Certs generation
- //
- for i := 0; i < numCores; i++ {
- key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
- if err != nil {
- t.Fatal(err)
- }
- certTemplate := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: "localhost",
- },
- DNSNames: []string{"localhost"},
- IPAddresses: certIPs,
- ExtKeyUsage: []x509.ExtKeyUsage{
- x509.ExtKeyUsageServerAuth,
- x509.ExtKeyUsageClientAuth,
- },
- KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
- SerialNumber: big.NewInt(mathrand.Int63()),
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: time.Now().Add(262980 * time.Hour),
- }
- certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, key.Public(), caKey)
- if err != nil {
- t.Fatal(err)
- }
- cert, err := x509.ParseCertificate(certBytes)
- if err != nil {
- t.Fatal(err)
- }
- certPEMBlock := &pem.Block{
- Type: "CERTIFICATE",
- Bytes: certBytes,
- }
- certPEM := pem.EncodeToMemory(certPEMBlock)
- marshaledKey, err := x509.MarshalECPrivateKey(key)
- if err != nil {
- t.Fatal(err)
- }
- keyPEMBlock := &pem.Block{
- Type: "EC PRIVATE KEY",
- Bytes: marshaledKey,
- }
- keyPEM := pem.EncodeToMemory(keyPEMBlock)
-
- certInfoSlice = append(certInfoSlice, &certInfo{
- cert: cert,
- certPEM: certPEM,
- certBytes: certBytes,
- key: key,
- keyPEM: keyPEM,
- })
- }
-
- //
- // Listener setup
- //
- logger := logging.NewVaultLogger(log.Trace)
- ports := make([]int, numCores)
- if baseAddr != nil {
- for i := 0; i < numCores; i++ {
- ports[i] = baseAddr.Port + i
- }
- } else {
- baseAddr = &net.TCPAddr{
- IP: net.ParseIP("127.0.0.1"),
- Port: 0,
- }
- }
-
- listeners := [][]*TestListener{}
- servers := []*http.Server{}
- handlers := []http.Handler{}
- tlsConfigs := []*tls.Config{}
- certGetters := []*reload.CertificateGetter{}
- for i := 0; i < numCores; i++ {
- baseAddr.Port = ports[i]
- ln, err := net.ListenTCP("tcp", baseAddr)
- if err != nil {
- t.Fatal(err)
- }
- certFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_cert.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
- keyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node%d_port_%d_key.pem", i+1, ln.Addr().(*net.TCPAddr).Port))
- err = ioutil.WriteFile(certFile, certInfoSlice[i].certPEM, 0755)
- if err != nil {
- t.Fatal(err)
- }
- err = ioutil.WriteFile(keyFile, certInfoSlice[i].keyPEM, 0755)
- if err != nil {
- t.Fatal(err)
- }
- tlsCert, err := tls.X509KeyPair(certInfoSlice[i].certPEM, certInfoSlice[i].keyPEM)
- if err != nil {
- t.Fatal(err)
- }
- certGetter := reload.NewCertificateGetter(certFile, keyFile, "")
- certGetters = append(certGetters, certGetter)
- tlsConfig := &tls.Config{
- Certificates: []tls.Certificate{tlsCert},
- RootCAs: testCluster.RootCAs,
- ClientCAs: testCluster.RootCAs,
- ClientAuth: tls.RequestClientCert,
- NextProtos: []string{"h2", "http/1.1"},
- GetCertificate: certGetter.GetCertificate,
- }
- tlsConfig.BuildNameToCertificate()
- tlsConfigs = append(tlsConfigs, tlsConfig)
- lns := []*TestListener{&TestListener{
- Listener: tls.NewListener(ln, tlsConfig),
- Address: ln.Addr().(*net.TCPAddr),
- },
- }
- listeners = append(listeners, lns)
- var handler http.Handler = http.NewServeMux()
- handlers = append(handlers, handler)
- server := &http.Server{
- Handler: handler,
- ErrorLog: logger.StandardLogger(nil),
- }
- servers = append(servers, server)
- }
-
- // Create three cores with the same physical and different redirect/cluster
- // addrs.
- // N.B.: On OSX, instead of random ports, it assigns new ports to new
- // listeners sequentially. Aside from being a bad idea in a security sense,
- // it also broke tests that assumed it was OK to just use the port above
- // the redirect addr. This has now been changed to 105 ports above, but if
- // we ever do more than three nodes in a cluster it may need to be bumped.
- // Note: it's 105 so that we don't conflict with a running Consul by
- // default.
- coreConfig := &CoreConfig{
- LogicalBackends: make(map[string]logical.Factory),
- CredentialBackends: make(map[string]logical.Factory),
- AuditBackends: make(map[string]audit.Factory),
- RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port),
- ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", listeners[0][0].Address.Port+105),
- DisableMlock: true,
- EnableUI: true,
- EnableRaw: true,
- BuiltinRegistry: NewMockBuiltinRegistry(),
- }
-
- if base != nil {
- coreConfig.DisableCache = base.DisableCache
- coreConfig.EnableUI = base.EnableUI
- coreConfig.DefaultLeaseTTL = base.DefaultLeaseTTL
- coreConfig.MaxLeaseTTL = base.MaxLeaseTTL
- coreConfig.CacheSize = base.CacheSize
- coreConfig.PluginDirectory = base.PluginDirectory
- coreConfig.Seal = base.Seal
- coreConfig.DevToken = base.DevToken
- coreConfig.EnableRaw = base.EnableRaw
- coreConfig.DisableSealWrap = base.DisableSealWrap
- coreConfig.DevLicenseDuration = base.DevLicenseDuration
- coreConfig.DisableCache = base.DisableCache
- if base.BuiltinRegistry != nil {
- coreConfig.BuiltinRegistry = base.BuiltinRegistry
- }
-
- if !coreConfig.DisableMlock {
- base.DisableMlock = false
- }
-
- if base.Physical != nil {
- coreConfig.Physical = base.Physical
- }
-
- if base.HAPhysical != nil {
- coreConfig.HAPhysical = base.HAPhysical
- }
-
- // Used to set something non-working to test fallback
- switch base.ClusterAddr {
- case "empty":
- coreConfig.ClusterAddr = ""
- case "":
- default:
- coreConfig.ClusterAddr = base.ClusterAddr
- }
-
- if base.LogicalBackends != nil {
- for k, v := range base.LogicalBackends {
- coreConfig.LogicalBackends[k] = v
- }
- }
- if base.CredentialBackends != nil {
- for k, v := range base.CredentialBackends {
- coreConfig.CredentialBackends[k] = v
- }
- }
- if base.AuditBackends != nil {
- for k, v := range base.AuditBackends {
- coreConfig.AuditBackends[k] = v
- }
- }
- if base.Logger != nil {
- coreConfig.Logger = base.Logger
- }
-
- coreConfig.ClusterCipherSuites = base.ClusterCipherSuites
-
- coreConfig.DisableCache = base.DisableCache
-
- coreConfig.DevToken = base.DevToken
- }
-
- if coreConfig.Physical == nil {
- coreConfig.Physical, err = physInmem.NewInmem(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- }
- if coreConfig.HAPhysical == nil {
- haPhys, err := physInmem.NewInmemHA(nil, logger)
- if err != nil {
- t.Fatal(err)
- }
- coreConfig.HAPhysical = haPhys.(physical.HABackend)
- }
-
- pubKey, priKey, err := testGenerateCoreKeys()
- if err != nil {
- t.Fatalf("err: %v", err)
- }
-
- cores := []*Core{}
- coreConfigs := []*CoreConfig{}
- for i := 0; i < numCores; i++ {
- localConfig := *coreConfig
- localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port)
- if localConfig.ClusterAddr != "" {
- localConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port+105)
- }
-
- // if opts.SealFunc is provided, use that to generate a seal for the config instead
- if opts != nil && opts.SealFunc != nil {
- localConfig.Seal = opts.SealFunc()
- }
-
- if opts != nil && opts.Logger != nil {
- localConfig.Logger = opts.Logger.Named(fmt.Sprintf("core%d", i))
- }
-
- localConfig.LicensingConfig = testGetLicensingConfig(pubKey)
-
- c, err := NewCore(&localConfig)
- if err != nil {
- t.Fatalf("err: %v", err)
- }
- cores = append(cores, c)
- coreConfigs = append(coreConfigs, &localConfig)
- if opts != nil && opts.HandlerFunc != nil {
- handlers[i] = opts.HandlerFunc(&HandlerProperties{
- Core: c,
- MaxRequestDuration: DefaultMaxRequestDuration,
- })
- servers[i].Handler = handlers[i]
- }
-
- // Set this in case the Seal was manually set before the core was
- // created
- if localConfig.Seal != nil {
- localConfig.Seal.SetCore(c)
- }
- }
-
- //
- // Clustering setup
- //
- clusterAddrGen := func(lns []*TestListener) []*net.TCPAddr {
- ret := make([]*net.TCPAddr, len(lns))
- for i, ln := range lns {
- ret[i] = &net.TCPAddr{
- IP: ln.Address.IP,
- Port: ln.Address.Port + 105,
- }
- }
- return ret
- }
-
- for i := 0; i < numCores; i++ {
- if coreConfigs[i].ClusterAddr != "" {
- cores[i].SetClusterListenerAddrs(clusterAddrGen(listeners[i]))
- cores[i].SetClusterHandler(handlers[i])
- }
- }
-
- if opts == nil || !opts.SkipInit {
- bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, cores[0], clusterAddrGen(listeners[0]), handlers[0])
- barrierKeys, _ := copystructure.Copy(bKeys)
- testCluster.BarrierKeys = barrierKeys.([][]byte)
- recoveryKeys, _ := copystructure.Copy(rKeys)
- testCluster.RecoveryKeys = recoveryKeys.([][]byte)
- testCluster.RootToken = root
-
- // Write root token and barrier keys
- err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755)
- if err != nil {
- t.Fatal(err)
- }
- var buf bytes.Buffer
- for i, key := range testCluster.BarrierKeys {
- buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
- if i < len(testCluster.BarrierKeys)-1 {
- buf.WriteRune('\n')
- }
- }
- err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755)
- if err != nil {
- t.Fatal(err)
- }
- for i, key := range testCluster.RecoveryKeys {
- buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
- if i < len(testCluster.RecoveryKeys)-1 {
- buf.WriteRune('\n')
- }
- }
- err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "recovery_keys"), buf.Bytes(), 0755)
- if err != nil {
- t.Fatal(err)
- }
-
- // Unseal first core
- for _, key := range bKeys {
- if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- ctx := context.Background()
-
- // If stored keys is supported, the above will no no-op, so trigger auto-unseal
- // using stored keys to try to unseal
- if err := cores[0].UnsealWithStoredKeys(ctx); err != nil {
- t.Fatal(err)
- }
-
- // Verify unsealed
- if cores[0].Sealed() {
- t.Fatal("should not be sealed")
- }
-
- TestWaitActive(t, cores[0])
-
- // Unseal other cores unless otherwise specified
- if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 {
- for i := 1; i < numCores; i++ {
- for _, key := range bKeys {
- if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil {
- t.Fatalf("unseal err: %s", err)
- }
- }
-
- // If stored keys is supported, the above will no no-op, so trigger auto-unseal
- // using stored keys
- if err := cores[i].UnsealWithStoredKeys(ctx); err != nil {
- t.Fatal(err)
- }
- }
-
- // Let them come fully up to standby
- time.Sleep(2 * time.Second)
-
- // Ensure cluster connection info is populated.
- // Other cores should not come up as leaders.
- for i := 1; i < numCores; i++ {
- isLeader, _, _, err := cores[i].Leader()
- if err != nil {
- t.Fatal(err)
- }
- if isLeader {
- t.Fatalf("core[%d] should not be leader", i)
- }
- }
- }
-
- //
- // Set test cluster core(s) and test cluster
- //
- cluster, err := cores[0].Cluster(context.Background())
- if err != nil {
- t.Fatal(err)
- }
- testCluster.ID = cluster.ID
- }
-
- getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client {
- transport := cleanhttp.DefaultPooledTransport()
- transport.TLSClientConfig = tlsConfig.Clone()
- if err := http2.ConfigureTransport(transport); err != nil {
- t.Fatal(err)
- }
- client := &http.Client{
- Transport: transport,
- CheckRedirect: func(*http.Request, []*http.Request) error {
- // This can of course be overridden per-test by using its own client
- return fmt.Errorf("redirects not allowed in these tests")
- },
- }
- config := api.DefaultConfig()
- if config.Error != nil {
- t.Fatal(config.Error)
- }
- config.Address = fmt.Sprintf("https://127.0.0.1:%d", port)
- config.HttpClient = client
- config.MaxRetries = 0
- apiClient, err := api.NewClient(config)
- if err != nil {
- t.Fatal(err)
- }
- if opts == nil || !opts.SkipInit {
- apiClient.SetToken(testCluster.RootToken)
- }
- return apiClient
- }
-
- var ret []*TestClusterCore
- for i := 0; i < numCores; i++ {
- tcc := &TestClusterCore{
- Core: cores[i],
- CoreConfig: coreConfigs[i],
- ServerKey: certInfoSlice[i].key,
- ServerKeyPEM: certInfoSlice[i].keyPEM,
- ServerCert: certInfoSlice[i].cert,
- ServerCertBytes: certInfoSlice[i].certBytes,
- ServerCertPEM: certInfoSlice[i].certPEM,
- Listeners: listeners[i],
- Handler: handlers[i],
- Server: servers[i],
- TLSConfig: tlsConfigs[i],
- Client: getAPIClient(listeners[i][0].Address.Port, tlsConfigs[i]),
- }
- tcc.ReloadFuncs = &cores[i].reloadFuncs
- tcc.ReloadFuncsLock = &cores[i].reloadFuncsLock
- tcc.ReloadFuncsLock.Lock()
- (*tcc.ReloadFuncs)["listener|tcp"] = []reload.ReloadFunc{certGetters[i].Reload}
- tcc.ReloadFuncsLock.Unlock()
-
- testAdjustTestCore(base, tcc)
-
- ret = append(ret, tcc)
- }
-
- testCluster.Cores = ret
-
- testExtraClusterCoresTestSetup(t, priKey, testCluster.Cores)
-
- return &testCluster
-}
-
-func NewMockBuiltinRegistry() *mockBuiltinRegistry {
- return &mockBuiltinRegistry{
- forTesting: map[string]consts.PluginType{
- "mysql-database-plugin": consts.PluginTypeDatabase,
- "postgresql-database-plugin": consts.PluginTypeDatabase,
- },
- }
-}
-
-type mockBuiltinRegistry struct {
- forTesting map[string]consts.PluginType
-}
-
-func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) {
- testPluginType, ok := m.forTesting[name]
- if !ok {
- return nil, false
- }
- if pluginType != testPluginType {
- return nil, false
- }
- if name == "postgresql-database-plugin" {
- return dbPostgres.New, true
- }
- return dbMysql.New(dbMysql.MetadataLen, dbMysql.MetadataLen, dbMysql.UsernameLen), true
-}
-
-// Keys only supports getting a realistic list of the keys for database plugins.
-func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string {
- if pluginType != consts.PluginTypeDatabase {
- return []string{}
- }
- /*
- This is a hard-coded reproduction of the db plugin keys in helper/builtinplugins/registry.go.
- The registry isn't directly used because it causes import cycles.
- */
- return []string{
- "mysql-database-plugin",
- "mysql-aurora-database-plugin",
- "mysql-rds-database-plugin",
- "mysql-legacy-database-plugin",
- "postgresql-database-plugin",
- "mssql-database-plugin",
- "cassandra-database-plugin",
- "mongodb-database-plugin",
- "hana-database-plugin",
- }
-}
-
-func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool {
- return false
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/testing_util.go b/vendor/github.com/hashicorp/vault/vault/testing_util.go
deleted file mode 100644
index 3aff71e1..00000000
--- a/vendor/github.com/hashicorp/vault/vault/testing_util.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import "github.com/mitchellh/go-testing-interface"
-
-func testGenerateCoreKeys() (interface{}, interface{}, error) { return nil, nil, nil }
-func testGetLicensingConfig(interface{}) *LicensingConfig { return &LicensingConfig{} }
-func testAdjustTestCore(*CoreConfig, *TestClusterCore) {}
-func testExtraClusterCoresTestSetup(testing.T, interface{}, []*TestClusterCore) {}
diff --git a/vendor/github.com/hashicorp/vault/vault/token_store.go b/vendor/github.com/hashicorp/vault/vault/token_store.go
deleted file mode 100644
index 3acebe66..00000000
--- a/vendor/github.com/hashicorp/vault/vault/token_store.go
+++ /dev/null
@@ -1,3166 +0,0 @@
-package vault
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "sync"
- "sync/atomic"
-
- "regexp"
- "strings"
- "time"
-
- proto "github.com/golang/protobuf/proto"
- "github.com/hashicorp/errwrap"
- log "github.com/hashicorp/go-hclog"
- sockaddr "github.com/hashicorp/go-sockaddr"
-
- "github.com/armon/go-metrics"
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/vault/helper/base62"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/identity"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/locksutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/helper/parseutil"
- "github.com/hashicorp/vault/helper/policyutil"
- "github.com/hashicorp/vault/helper/salt"
- "github.com/hashicorp/vault/helper/strutil"
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/logical/framework"
- "github.com/hashicorp/vault/logical/plugin/pb"
- "github.com/mitchellh/mapstructure"
-)
-
-const (
- // idPrefix is the prefix used to store tokens for their
- // primary ID based index
- idPrefix = "id/"
-
- // accessorPrefix is the prefix used to store the index from
- // Accessor to Token ID
- accessorPrefix = "accessor/"
-
- // parentPrefix is the prefix used to store tokens for their
- // secondar parent based index
- parentPrefix = "parent/"
-
- // tokenSubPath is the sub-path used for the token store
- // view. This is nested under the system view.
- tokenSubPath = "token/"
-
- // rolesPrefix is the prefix used to store role information
- rolesPrefix = "roles/"
-
- // tokenRevocationPending indicates that the token should not be used
- // again. If this is encountered during an existing request flow, it means
- // that the token is but is currently fulfilling its final use; after this
- // request it will not be able to be looked up as being valid.
- tokenRevocationPending = -1
-)
-
-var (
- // TokenLength is the size of tokens we are currently generating, without
- // any namespace information
- TokenLength = 24
-
- // displayNameSanitize is used to sanitize a display name given to a token.
- displayNameSanitize = regexp.MustCompile("[^a-zA-Z0-9-]")
-
- // pathSuffixSanitize is used to ensure a path suffix in a role is valid.
- pathSuffixSanitize = regexp.MustCompile("\\w[\\w-.]+\\w")
-
- destroyCubbyhole = func(ctx context.Context, ts *TokenStore, te *logical.TokenEntry) error {
- if ts.cubbyholeBackend == nil {
- // Should only ever happen in testing
- return nil
- }
-
- if te == nil {
- return errors.New("nil token entry")
- }
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, ts.core)
- if err != nil {
- return err
- }
- if tokenNS == nil {
- return namespace.ErrNoNamespace
- }
-
- switch tokenNS.ID {
- case namespace.RootNamespaceID:
- saltedID, err := ts.SaltID(ctx, te.ID)
- if err != nil {
- return err
- }
- return ts.cubbyholeBackend.revoke(ctx, salt.SaltID(ts.cubbyholeBackend.saltUUID, saltedID, salt.SHA1Hash))
-
- default:
- if te.CubbyholeID == "" {
- return fmt.Errorf("missing cubbyhole ID while destroying")
- }
- return ts.cubbyholeBackend.revoke(ctx, te.CubbyholeID)
- }
- }
-)
-
-func (ts *TokenStore) paths() []*framework.Path {
- return []*framework.Path{
- {
- Pattern: "roles/?$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: ts.tokenStoreRoleList,
- },
-
- HelpSynopsis: tokenListRolesHelp,
- HelpDescription: tokenListRolesHelp,
- },
-
- {
- Pattern: "accessors/$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ListOperation: ts.tokenStoreAccessorList,
- },
-
- HelpSynopsis: tokenListAccessorsHelp,
- HelpDescription: tokenListAccessorsHelp,
- },
-
- {
- Pattern: "roles/" + framework.GenericNameRegex("role_name"),
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
-
- "allowed_policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: tokenAllowedPoliciesHelp,
- },
-
- "disallowed_policies": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: tokenDisallowedPoliciesHelp,
- },
-
- "orphan": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: false,
- Description: tokenOrphanHelp,
- },
-
- "period": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: tokenPeriodHelp,
- },
-
- "path_suffix": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "",
- Description: tokenPathSuffixHelp + pathSuffixSanitize.String(),
- },
-
- "explicit_max_ttl": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: tokenExplicitMaxTTLHelp,
- },
-
- "renewable": &framework.FieldSchema{
- Type: framework.TypeBool,
- Default: true,
- Description: tokenRenewableHelp,
- },
-
- "bound_cidrs": &framework.FieldSchema{
- Type: framework.TypeCommaStringSlice,
- Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`,
- },
-
- "token_type": &framework.FieldSchema{
- Type: framework.TypeString,
- Default: "service",
- Description: "The type of token to generate, service or batch",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: ts.tokenStoreRoleRead,
- logical.CreateOperation: ts.tokenStoreRoleCreateUpdate,
- logical.UpdateOperation: ts.tokenStoreRoleCreateUpdate,
- logical.DeleteOperation: ts.tokenStoreRoleDelete,
- },
-
- ExistenceCheck: ts.tokenStoreRoleExistenceCheck,
- },
-
- {
- Pattern: "create-orphan$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleCreateOrphan,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenCreateOrphanHelp),
- HelpDescription: strings.TrimSpace(tokenCreateOrphanHelp),
- },
-
- {
- Pattern: "create/" + framework.GenericNameRegex("role_name"),
-
- Fields: map[string]*framework.FieldSchema{
- "role_name": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Name of the role",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleCreateAgainstRole,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenCreateRoleHelp),
- HelpDescription: strings.TrimSpace(tokenCreateRoleHelp),
- },
-
- {
- Pattern: "create$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleCreate,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenCreateHelp),
- HelpDescription: strings.TrimSpace(tokenCreateHelp),
- },
-
- {
- Pattern: "lookup",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token to lookup (POST request body)",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.ReadOperation: ts.handleLookup,
- logical.UpdateOperation: ts.handleLookup,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenLookupHelp),
- HelpDescription: strings.TrimSpace(tokenLookupHelp),
- },
-
- {
- Pattern: "lookup-accessor",
-
- Fields: map[string]*framework.FieldSchema{
- "accessor": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Accessor of the token to look up (request body)",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleUpdateLookupAccessor,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenLookupAccessorHelp),
- HelpDescription: strings.TrimSpace(tokenLookupAccessorHelp),
- },
-
- {
- Pattern: "lookup-self$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token to look up (unused, does not need to be set)",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleLookupSelf,
- logical.ReadOperation: ts.handleLookupSelf,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenLookupHelp),
- HelpDescription: strings.TrimSpace(tokenLookupHelp),
- },
-
- {
- Pattern: "revoke-accessor",
-
- Fields: map[string]*framework.FieldSchema{
- "accessor": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Accessor of the token (request body)",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleUpdateRevokeAccessor,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenRevokeAccessorHelp),
- HelpDescription: strings.TrimSpace(tokenRevokeAccessorHelp),
- },
-
- {
- Pattern: "revoke-self$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleRevokeSelf,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenRevokeSelfHelp),
- HelpDescription: strings.TrimSpace(tokenRevokeSelfHelp),
- },
-
- {
- Pattern: "revoke",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token to revoke (request body)",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleRevokeTree,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenRevokeHelp),
- HelpDescription: strings.TrimSpace(tokenRevokeHelp),
- },
-
- {
- Pattern: "revoke-orphan",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token to revoke (request body)",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleRevokeOrphan,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenRevokeOrphanHelp),
- HelpDescription: strings.TrimSpace(tokenRevokeOrphanHelp),
- },
-
- {
- Pattern: "renew-self$",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token to renew (unused, does not need to be set)",
- },
- "increment": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: "The desired increment in seconds to the token expiration",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleRenewSelf,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenRenewSelfHelp),
- HelpDescription: strings.TrimSpace(tokenRenewSelfHelp),
- },
-
- {
- Pattern: "renew",
-
- Fields: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token to renew (request body)",
- },
- "increment": &framework.FieldSchema{
- Type: framework.TypeDurationSecond,
- Default: 0,
- Description: "The desired increment in seconds to the token expiration",
- },
- },
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleRenew,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenRenewHelp),
- HelpDescription: strings.TrimSpace(tokenRenewHelp),
- },
-
- {
- Pattern: "tidy$",
-
- Callbacks: map[logical.Operation]framework.OperationFunc{
- logical.UpdateOperation: ts.handleTidy,
- },
-
- HelpSynopsis: strings.TrimSpace(tokenTidyHelp),
- HelpDescription: strings.TrimSpace(tokenTidyDesc),
- },
- }
-}
-
-// LookupToken returns the properties of the token from the token store. This
-// is particularly useful to fetch the accessor of the client token and get it
-// populated in the logical request along with the client token. The accessor
-// of the client token can get audit logged.
-func (c *Core) LookupToken(ctx context.Context, token string) (*logical.TokenEntry, error) {
- if c.Sealed() {
- return nil, consts.ErrSealed
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
-
- if c.standby && !c.perfStandby {
- return nil, consts.ErrStandby
- }
-
- // Many tests don't have a token store running
- if c.tokenStore == nil || c.tokenStore.expiration == nil {
- return nil, nil
- }
-
- return c.tokenStore.Lookup(ctx, token)
-}
-
-// TokenStore is used to manage client tokens. Tokens are used for
-// clients to authenticate, and each token is mapped to an applicable
-// set of policy which is used for authorization.
-type TokenStore struct {
- *framework.Backend
-
- activeContext context.Context
-
- core *Core
-
- batchTokenEncryptor BarrierEncryptor
-
- baseBarrierView *BarrierView
- idBarrierView *BarrierView
- accessorBarrierView *BarrierView
- parentBarrierView *BarrierView
- rolesBarrierView *BarrierView
-
- expiration *ExpirationManager
-
- cubbyholeBackend *CubbyholeBackend
-
- tokenLocks []*locksutil.LockEntry
-
- // tokenPendingDeletion stores tokens that are being revoked. If the token is
- // not in the map, it means that there's no deletion in progress. If the value
- // is true it means deletion is in progress, and if false it means deletion
- // failed. Revocation needs to handle these states accordingly.
- tokensPendingDeletion *sync.Map
-
- cubbyholeDestroyer func(context.Context, *TokenStore, *logical.TokenEntry) error
-
- logger log.Logger
-
- saltLock sync.RWMutex
- salts map[string]*salt.Salt
-
- tidyLock *uint32
-
- identityPoliciesDeriverFunc func(string) (*identity.Entity, []string, error)
-
- quitContext context.Context
-}
-
-// NewTokenStore is used to construct a token store that is
-// backed by the given barrier view.
-func NewTokenStore(ctx context.Context, logger log.Logger, core *Core, config *logical.BackendConfig) (*TokenStore, error) {
- // Create a sub-view
- view := core.systemBarrierView.SubView(tokenSubPath)
-
- // Initialize the store
- t := &TokenStore{
- activeContext: ctx,
- core: core,
- batchTokenEncryptor: core.barrier,
- baseBarrierView: view,
- idBarrierView: view.SubView(idPrefix),
- accessorBarrierView: view.SubView(accessorPrefix),
- parentBarrierView: view.SubView(parentPrefix),
- rolesBarrierView: view.SubView(rolesPrefix),
- cubbyholeDestroyer: destroyCubbyhole,
- logger: logger,
- tokenLocks: locksutil.CreateLocks(),
- tokensPendingDeletion: &sync.Map{},
- saltLock: sync.RWMutex{},
- tidyLock: new(uint32),
- quitContext: core.activeContext,
- salts: make(map[string]*salt.Salt),
- }
-
- // Setup the framework endpoints
- t.Backend = &framework.Backend{
- AuthRenew: t.authRenew,
-
- PathsSpecial: &logical.Paths{
- Root: []string{
- "revoke-orphan/*",
- "accessors*",
- },
-
- // Most token store items are local since tokens are local, but a
- // notable exception is roles
- LocalStorage: []string{
- idPrefix,
- accessorPrefix,
- parentPrefix,
- salt.DefaultLocation,
- },
- },
- BackendType: logical.TypeCredential,
- }
-
- t.Backend.Paths = append(t.Backend.Paths, t.paths()...)
-
- t.Backend.Setup(ctx, config)
-
- return t, nil
-}
-
-func (ts *TokenStore) Invalidate(ctx context.Context, key string) {
- //ts.logger.Debug("invalidating key", "key", key)
-
- switch key {
- case tokenSubPath + salt.DefaultLocation:
- ts.saltLock.Lock()
- ts.salts = make(map[string]*salt.Salt)
- ts.saltLock.Unlock()
- }
-}
-
-func (ts *TokenStore) Salt(ctx context.Context) (*salt.Salt, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- ts.saltLock.RLock()
- if salt, ok := ts.salts[ns.ID]; ok {
- defer ts.saltLock.RUnlock()
- return salt, nil
- }
- ts.saltLock.RUnlock()
- ts.saltLock.Lock()
- defer ts.saltLock.Unlock()
- if salt, ok := ts.salts[ns.ID]; ok {
- return salt, nil
- }
-
- salt, err := salt.NewSalt(ctx, ts.baseView(ns), &salt.Config{
- HashFunc: salt.SHA1Hash,
- Location: salt.DefaultLocation,
- })
- if err != nil {
- return nil, err
- }
- ts.salts[ns.ID] = salt
- return salt, nil
-}
-
-// tsRoleEntry contains token store role information
-type tsRoleEntry struct {
- // The name of the role. Embedded so it can be used for pathing
- Name string `json:"name" mapstructure:"name" structs:"name"`
-
- // The policies that creation functions using this role can assign to a token,
- // escaping or further locking down normal subset checking
- AllowedPolicies []string `json:"allowed_policies" mapstructure:"allowed_policies" structs:"allowed_policies"`
-
- // List of policies to be not allowed during token creation using this role
- DisallowedPolicies []string `json:"disallowed_policies" mapstructure:"disallowed_policies" structs:"disallowed_policies"`
-
- // If true, tokens created using this role will be orphans
- Orphan bool `json:"orphan" mapstructure:"orphan" structs:"orphan"`
-
- // If non-zero, tokens created using this role will be able to be renewed
- // forever, but will have a fixed renewal period of this value
- Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
-
- // If set, a suffix will be set on the token path, making it easier to
- // revoke using 'revoke-prefix'
- PathSuffix string `json:"path_suffix" mapstructure:"path_suffix" structs:"path_suffix"`
-
- // If set, controls whether created tokens are marked as being renewable
- Renewable bool `json:"renewable" mapstructure:"renewable" structs:"renewable"`
-
- // If set, the token entry will have an explicit maximum TTL set, rather
- // than deferring to role/mount values
- ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"`
-
- // The set of CIDRs that tokens generated using this role will be bound to
- BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"`
-
- // The type of token this role should issue
- TokenType logical.TokenType `json:"token_type" mapstructure:"token_type"`
-}
-
-type accessorEntry struct {
- TokenID string `json:"token_id"`
- AccessorID string `json:"accessor_id"`
- NamespaceID string `json:"namespace_id"`
-}
-
-// SetExpirationManager is used to provide the token store with
-// an expiration manager. This is used to manage prefix based revocation
-// of tokens and to tidy entries when removed from the token store.
-func (ts *TokenStore) SetExpirationManager(exp *ExpirationManager) {
- ts.expiration = exp
-}
-
-// SaltID is used to apply a salt and hash to an ID to make sure its not reversible
-func (ts *TokenStore) SaltID(ctx context.Context, id string) (string, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return "", namespace.ErrNoNamespace
- }
-
- s, err := ts.Salt(ctx)
- if err != nil {
- return "", err
- }
-
- // For tokens of older format and belonging to the root namespace, use SHA1
- // hash for salting.
- if ns.ID == namespace.RootNamespaceID && !strings.Contains(id, ".") {
- return s.SaltID(id), nil
- }
-
- // For all other tokens, use SHA2-256 HMAC for salting. This includes
- // tokens of older format, but belonging to a namespace other than the root
- // namespace.
- return "h" + s.GetHMAC(id), nil
-}
-
-// rootToken is used to generate a new token with root privileges and no parent
-func (ts *TokenStore) rootToken(ctx context.Context) (*logical.TokenEntry, error) {
- ctx = namespace.ContextWithNamespace(ctx, namespace.RootNamespace)
- te := &logical.TokenEntry{
- Policies: []string{"root"},
- Path: "auth/token/root",
- DisplayName: "root",
- CreationTime: time.Now().Unix(),
- NamespaceID: namespace.RootNamespaceID,
- Type: logical.TokenTypeService,
- }
- if err := ts.create(ctx, te); err != nil {
- return nil, err
- }
- return te, nil
-}
-
-func (ts *TokenStore) tokenStoreAccessorList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- nsID := ns.ID
-
- entries, err := ts.accessorView(ns).List(ctx, "")
- if err != nil {
- return nil, err
- }
-
- resp := &logical.Response{}
-
- ret := make([]string, 0, len(entries))
- for _, entry := range entries {
- aEntry, err := ts.lookupByAccessor(ctx, entry, true, false)
- if err != nil {
- resp.AddWarning("Found an accessor entry that could not be successfully decoded")
- continue
- }
-
- if aEntry.TokenID == "" {
- resp.AddWarning(fmt.Sprintf("Found an accessor entry missing a token: %v", aEntry.AccessorID))
- continue
- }
-
- if aEntry.NamespaceID == nsID {
- ret = append(ret, aEntry.AccessorID)
- }
- }
-
- resp.Data = map[string]interface{}{
- "keys": ret,
- }
- return resp, nil
-}
-
-// createAccessor is used to create an identifier for the token ID.
-// A storage index, mapping the accessor to the token ID is also created.
-func (ts *TokenStore) createAccessor(ctx context.Context, entry *logical.TokenEntry) error {
- defer metrics.MeasureSince([]string{"token", "createAccessor"}, time.Now())
-
- var err error
- // Create a random accessor
- entry.Accessor, err = base62.Random(TokenLength, true)
- if err != nil {
- return err
- }
-
- tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core)
- if err != nil {
- return err
- }
- if tokenNS == nil {
- return namespace.ErrNoNamespace
- }
-
- if tokenNS.ID != namespace.RootNamespaceID {
- entry.Accessor = fmt.Sprintf("%s.%s", entry.Accessor, tokenNS.ID)
- }
-
- // Create index entry, mapping the accessor to the token ID
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- saltID, err := ts.SaltID(saltCtx, entry.Accessor)
- if err != nil {
- return err
- }
-
- aEntry := &accessorEntry{
- TokenID: entry.ID,
- AccessorID: entry.Accessor,
- NamespaceID: entry.NamespaceID,
- }
-
- aEntryBytes, err := jsonutil.EncodeJSON(aEntry)
- if err != nil {
- return errwrap.Wrapf("failed to marshal accessor index entry: {{err}}", err)
- }
-
- le := &logical.StorageEntry{Key: saltID, Value: aEntryBytes}
- if err := ts.accessorView(tokenNS).Put(ctx, le); err != nil {
- return errwrap.Wrapf("failed to persist accessor index entry: {{err}}", err)
- }
- return nil
-}
-
-// Create is used to create a new token entry. The entry is assigned
-// a newly generated ID if not provided.
-func (ts *TokenStore) create(ctx context.Context, entry *logical.TokenEntry) error {
- defer metrics.MeasureSince([]string{"token", "create"}, time.Now())
-
- tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core)
- if err != nil {
- return err
- }
- if tokenNS == nil {
- return namespace.ErrNoNamespace
- }
-
- entry.Policies = policyutil.SanitizePolicies(entry.Policies, policyutil.DoNotAddDefaultPolicy)
-
- switch entry.Type {
- case logical.TokenTypeDefault, logical.TokenTypeService:
- // In case it was default, force to service
- entry.Type = logical.TokenTypeService
-
- // Generate an ID if necessary
- userSelectedID := true
- if entry.ID == "" {
- userSelectedID = false
- var err error
- entry.ID, err = base62.Random(TokenLength, true)
- if err != nil {
- return err
- }
- }
-
- if userSelectedID && strings.HasPrefix(entry.ID, "s.") {
- return fmt.Errorf("custom token ID cannot have the 's.' prefix")
- }
-
- if !userSelectedID {
- entry.ID = fmt.Sprintf("s.%s", entry.ID)
- }
-
- // Attach namespace ID for tokens that are not belonging to the root
- // namespace
- if tokenNS.ID != namespace.RootNamespaceID {
- entry.ID = fmt.Sprintf("%s.%s", entry.ID, tokenNS.ID)
- }
-
- if tokenNS.ID != namespace.RootNamespaceID || strings.HasPrefix(entry.ID, "s.") {
- if entry.CubbyholeID == "" {
- cubbyholeID, err := base62.Random(TokenLength, true)
- if err != nil {
- return err
- }
- entry.CubbyholeID = cubbyholeID
- }
- }
-
- // If the user didn't specifically pick the ID, e.g. because they were
- // sudo/root, check for collision; otherwise trust the process
- if userSelectedID {
- exist, _ := ts.lookupInternal(ctx, entry.ID, false, true)
- if exist != nil {
- return fmt.Errorf("cannot create a token with a duplicate ID")
- }
- }
-
- err = ts.createAccessor(ctx, entry)
- if err != nil {
- return err
- }
-
- return ts.storeCommon(ctx, entry, true)
-
- case logical.TokenTypeBatch:
- // Ensure fields we don't support/care about are nilled, proto marshal,
- // encrypt, skip persistence
- entry.ID = ""
- pEntry := &pb.TokenEntry{
- Parent: entry.Parent,
- Policies: entry.Policies,
- Path: entry.Path,
- Meta: entry.Meta,
- DisplayName: entry.DisplayName,
- CreationTime: entry.CreationTime,
- TTL: int64(entry.TTL),
- Role: entry.Role,
- EntityID: entry.EntityID,
- NamespaceID: entry.NamespaceID,
- Type: uint32(entry.Type),
- }
-
- boundCIDRs := make([]string, len(entry.BoundCIDRs))
- for i, cidr := range entry.BoundCIDRs {
- boundCIDRs[i] = cidr.String()
- }
- pEntry.BoundCIDRs = boundCIDRs
-
- mEntry, err := proto.Marshal(pEntry)
- if err != nil {
- return err
- }
-
- eEntry, err := ts.batchTokenEncryptor.Encrypt(ctx, "", mEntry)
- if err != nil {
- return err
- }
-
- bEntry := base64.RawURLEncoding.EncodeToString(eEntry)
- entry.ID = fmt.Sprintf("b.%s", bEntry)
-
- if tokenNS.ID != namespace.RootNamespaceID {
- entry.ID = fmt.Sprintf("%s.%s", entry.ID, tokenNS.ID)
- }
-
- return nil
-
- default:
- return fmt.Errorf("cannot create a token of type %d", entry.Type)
- }
-}
-
-// Store is used to store an updated token entry without writing the
-// secondary index.
-func (ts *TokenStore) store(ctx context.Context, entry *logical.TokenEntry) error {
- defer metrics.MeasureSince([]string{"token", "store"}, time.Now())
- return ts.storeCommon(ctx, entry, false)
-}
-
-// storeCommon handles the actual storage of an entry, possibly generating
-// secondary indexes
-func (ts *TokenStore) storeCommon(ctx context.Context, entry *logical.TokenEntry, writeSecondary bool) error {
- tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core)
- if err != nil {
- return err
- }
- if tokenNS == nil {
- return namespace.ErrNoNamespace
- }
-
- saltCtx := namespace.ContextWithNamespace(ctx, tokenNS)
- saltedID, err := ts.SaltID(saltCtx, entry.ID)
- if err != nil {
- return err
- }
-
- // Marshal the entry
- enc, err := json.Marshal(entry)
- if err != nil {
- return errwrap.Wrapf("failed to encode entry: {{err}}", err)
- }
-
- if writeSecondary {
- // Write the secondary index if necessary. This is done before the
- // primary index because we'd rather have a dangling pointer with
- // a missing primary instead of missing the parent index and potentially
- // escaping the revocation chain.
- if entry.Parent != "" {
- // Ensure the parent exists
- parent, err := ts.Lookup(ctx, entry.Parent)
- if err != nil {
- return errwrap.Wrapf("failed to lookup parent: {{err}}", err)
- }
- if parent == nil {
- return fmt.Errorf("parent token not found")
- }
-
- parentNS, err := NamespaceByID(ctx, parent.NamespaceID, ts.core)
- if err != nil {
- return err
- }
- if parentNS == nil {
- return namespace.ErrNoNamespace
- }
-
- parentCtx := namespace.ContextWithNamespace(ctx, parentNS)
-
- // Create the index entry
- parentSaltedID, err := ts.SaltID(parentCtx, entry.Parent)
- if err != nil {
- return err
- }
-
- path := parentSaltedID + "/" + saltedID
- if tokenNS.ID != namespace.RootNamespaceID {
- path = fmt.Sprintf("%s.%s", path, tokenNS.ID)
- }
-
- le := &logical.StorageEntry{Key: path}
- if err := ts.parentView(parentNS).Put(ctx, le); err != nil {
- return errwrap.Wrapf("failed to persist entry: {{err}}", err)
- }
- }
- }
-
- // Write the primary ID
- le := &logical.StorageEntry{Key: saltedID, Value: enc}
- if len(entry.Policies) == 1 && entry.Policies[0] == "root" {
- le.SealWrap = true
- }
- if err := ts.idView(tokenNS).Put(ctx, le); err != nil {
- return errwrap.Wrapf("failed to persist entry: {{err}}", err)
- }
- return nil
-}
-
-// UseToken is used to manage restricted use tokens and decrement their
-// available uses. Returns two values: a potentially updated entry or, if the
-// token has been revoked, nil; and whether an error was encountered. The
-// locking here isn't perfect, as other parts of the code may update an entry,
-// but usually none after the entry is already created...so this is pretty
-// good.
-func (ts *TokenStore) UseToken(ctx context.Context, te *logical.TokenEntry) (*logical.TokenEntry, error) {
- if te == nil {
- return nil, fmt.Errorf("invalid token entry provided for use count decrementing")
- }
-
- // This case won't be hit with a token with restricted uses because we go
- // from 1 to -1. So it's a nice optimization to check this without a read
- // lock.
- if te.NumUses == 0 {
- return te, nil
- }
-
- // If we are attempting to unwrap a control group request, don't use the token.
- // It will be manually revoked by the handler.
- if len(te.Policies) == 1 && te.Policies[0] == controlGroupPolicyName {
- return te, nil
- }
-
- lock := locksutil.LockForKey(ts.tokenLocks, te.ID)
- lock.Lock()
- defer lock.Unlock()
-
- var err error
- te, err = ts.lookupInternal(ctx, te.ID, false, false)
- if err != nil {
- return nil, errwrap.Wrapf("failed to refresh entry: {{err}}", err)
- }
- // If it can't be found we shouldn't be trying to use it, so if we get nil
- // back, it is because it has been revoked in the interim or will be
- // revoked (NumUses is -1)
- if te == nil {
- return nil, fmt.Errorf("token not found or fully used already")
- }
-
- // Decrement the count. If this is our last use count, we need to indicate
- // that this is no longer valid, but revocation is deferred to the end of
- // the call, so this will make sure that any Lookup that happens doesn't
- // return an entry. This essentially acts as a write-ahead lock and is
- // especially useful since revocation can end up (via the expiration
- // manager revoking children) attempting to acquire the same lock
- // repeatedly.
- if te.NumUses == 1 {
- te.NumUses = tokenRevocationPending
- } else {
- te.NumUses--
- }
-
- err = ts.store(ctx, te)
- if err != nil {
- return nil, err
- }
-
- return te, nil
-}
-
-func (ts *TokenStore) UseTokenByID(ctx context.Context, id string) (*logical.TokenEntry, error) {
- te, err := ts.Lookup(ctx, id)
- if err != nil {
- return te, err
- }
-
- return ts.UseToken(ctx, te)
-}
-
-// Lookup is used to find a token given its ID. It acquires a read lock, then calls lookupInternal.
-func (ts *TokenStore) Lookup(ctx context.Context, id string) (*logical.TokenEntry, error) {
- defer metrics.MeasureSince([]string{"token", "lookup"}, time.Now())
- if id == "" {
- return nil, fmt.Errorf("cannot lookup blank token")
- }
-
- // If it starts with "b." it's a batch token
- if len(id) > 2 && strings.HasPrefix(id, "b.") {
- return ts.lookupBatchToken(ctx, id)
- }
-
- lock := locksutil.LockForKey(ts.tokenLocks, id)
- lock.RLock()
- defer lock.RUnlock()
-
- return ts.lookupInternal(ctx, id, false, false)
-}
-
-// lookupTainted is used to find a token that may or may not be tainted given
-// its ID. It acquires a read lock, then calls lookupInternal.
-func (ts *TokenStore) lookupTainted(ctx context.Context, id string) (*logical.TokenEntry, error) {
- defer metrics.MeasureSince([]string{"token", "lookup"}, time.Now())
- if id == "" {
- return nil, fmt.Errorf("cannot lookup blank token")
- }
-
- lock := locksutil.LockForKey(ts.tokenLocks, id)
- lock.RLock()
- defer lock.RUnlock()
-
- return ts.lookupInternal(ctx, id, false, true)
-}
-
-func (ts *TokenStore) lookupBatchToken(ctx context.Context, id string) (*logical.TokenEntry, error) {
- // Strip the b. from the front and namespace ID from the back
- bEntry, _ := namespace.SplitIDFromString(id[2:])
-
- eEntry, err := base64.RawURLEncoding.DecodeString(bEntry)
- if err != nil {
- return nil, err
- }
-
- mEntry, err := ts.batchTokenEncryptor.Decrypt(ctx, "", eEntry)
- if err != nil {
- return nil, nil
- }
-
- pEntry := new(pb.TokenEntry)
- if err := proto.Unmarshal(mEntry, pEntry); err != nil {
- return nil, err
- }
-
- te, err := pb.ProtoTokenEntryToLogicalTokenEntry(pEntry)
- if err != nil {
- return nil, err
- }
-
- if time.Now().After(time.Unix(te.CreationTime, 0).Add(te.TTL)) {
- return nil, nil
- }
-
- if te.Parent != "" {
- pte, err := ts.Lookup(ctx, te.Parent)
- if err != nil {
- return nil, err
- }
- if pte == nil {
- return nil, nil
- }
- }
-
- te.ID = id
- return te, nil
-}
-
-// lookupInternal is used to find a token given its (possibly salted) ID. If
-// tainted is true, entries that are in some revocation state (currently,
-// indicated by num uses < 0), the entry will be returned anyways
-func (ts *TokenStore) lookupInternal(ctx context.Context, id string, salted, tainted bool) (*logical.TokenEntry, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, errwrap.Wrapf("failed to find namespace in context: {{err}}", err)
- }
-
- // If it starts with "b." it's a batch token
- if len(id) > 2 && strings.HasPrefix(id, "b.") {
- return ts.lookupBatchToken(ctx, id)
- }
-
- var raw *logical.StorageEntry
- lookupID := id
-
- if !salted {
- // If possible, always use the token's namespace. If it doesn't match
- // the request namespace, ensure the request namespace is a child
- _, nsID := namespace.SplitIDFromString(id)
- if nsID != "" {
- tokenNS, err := NamespaceByID(ctx, nsID, ts.core)
- if err != nil {
- return nil, errwrap.Wrapf("failed to look up namespace from the token: {{err}}", err)
- }
- if tokenNS != nil {
- if tokenNS.ID != ns.ID {
- ns = tokenNS
- ctx = namespace.ContextWithNamespace(ctx, tokenNS)
- }
- }
- } else {
- // Any non-root-ns token should have an accessor and child
- // namespaces cannot have custom IDs. If someone omits or tampers
- // with it, the lookup in the root namespace simply won't work.
- ns = namespace.RootNamespace
- ctx = namespace.ContextWithNamespace(ctx, ns)
- }
-
- lookupID, err = ts.SaltID(ctx, id)
- if err != nil {
- return nil, err
- }
- }
-
- raw, err = ts.idView(ns).Get(ctx, lookupID)
- if err != nil {
- return nil, errwrap.Wrapf("failed to read entry: {{err}}", err)
- }
-
- // Bail if not found
- if raw == nil {
- return nil, nil
- }
-
- // Unmarshal the token
- entry := new(logical.TokenEntry)
- if err := jsonutil.DecodeJSON(raw.Value, entry); err != nil {
- return nil, errwrap.Wrapf("failed to decode entry: {{err}}", err)
- }
-
- // This is a token that is awaiting deferred revocation or tainted
- if entry.NumUses < 0 && !tainted {
- return nil, nil
- }
-
- if entry.NamespaceID == "" {
- entry.NamespaceID = namespace.RootNamespaceID
- }
-
- // This will be the upgrade case
- if entry.Type == logical.TokenTypeDefault {
- entry.Type = logical.TokenTypeService
- }
-
- persistNeeded := false
-
- // Upgrade the deprecated fields
- if entry.DisplayNameDeprecated != "" {
- if entry.DisplayName == "" {
- entry.DisplayName = entry.DisplayNameDeprecated
- }
- entry.DisplayNameDeprecated = ""
- persistNeeded = true
- }
-
- if entry.CreationTimeDeprecated != 0 {
- if entry.CreationTime == 0 {
- entry.CreationTime = entry.CreationTimeDeprecated
- }
- entry.CreationTimeDeprecated = 0
- persistNeeded = true
- }
-
- if entry.ExplicitMaxTTLDeprecated != 0 {
- if entry.ExplicitMaxTTL == 0 {
- entry.ExplicitMaxTTL = entry.ExplicitMaxTTLDeprecated
- }
- entry.ExplicitMaxTTLDeprecated = 0
- persistNeeded = true
- }
-
- if entry.NumUsesDeprecated != 0 {
- if entry.NumUses == 0 || entry.NumUsesDeprecated < entry.NumUses {
- entry.NumUses = entry.NumUsesDeprecated
- }
- entry.NumUsesDeprecated = 0
- persistNeeded = true
- }
-
- // It's a root token with unlimited creation TTL (so never had an
- // expiration); this may or may not have a lease (based on when it was
- // generated, for later revocation purposes) but it doesn't matter, it's
- // allowed. Fast-path this.
- if len(entry.Policies) == 1 && entry.Policies[0] == "root" && entry.TTL == 0 {
- // If fields are getting upgraded, store the changes
- if persistNeeded {
- if err := ts.store(ctx, entry); err != nil {
- return nil, errwrap.Wrapf("failed to persist token upgrade: {{err}}", err)
- }
- }
- return entry, nil
- }
-
- // Perform these checks on upgraded fields, but before persisting
-
- // If we are still restoring the expiration manager, we want to ensure the
- // token is not expired
- if ts.expiration == nil {
- return nil, errors.New("expiration manager is nil on tokenstore")
- }
- le, err := ts.expiration.FetchLeaseTimesByToken(ctx, entry)
- if err != nil {
- return nil, errwrap.Wrapf("failed to fetch lease times: {{err}}", err)
- }
-
- var ret *logical.TokenEntry
-
- switch {
- // It's any kind of expiring token with no lease, immediately delete it
- case le == nil:
- tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core)
- if err != nil {
- return nil, err
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS)
- leaseID, err := ts.expiration.CreateOrFetchRevocationLeaseByToken(revokeCtx, entry)
- if err != nil {
- return nil, err
- }
-
- err = ts.expiration.Revoke(revokeCtx, leaseID)
- if err != nil {
- return nil, err
- }
-
- // Only return if we're not past lease expiration (or if tainted is true),
- // otherwise assume expmgr is working on revocation
- default:
- if !le.ExpireTime.Before(time.Now()) || tainted {
- ret = entry
- }
- }
-
- // If fields are getting upgraded, store the changes
- if persistNeeded {
- if err := ts.store(ctx, entry); err != nil {
- return nil, errwrap.Wrapf("failed to persist token upgrade: {{err}}", err)
- }
- }
-
- return ret, nil
-}
-
-// Revoke is used to invalidate a given token, any child tokens
-// will be orphaned.
-func (ts *TokenStore) revokeOrphan(ctx context.Context, id string) error {
- defer metrics.MeasureSince([]string{"token", "revoke"}, time.Now())
- if id == "" {
- return fmt.Errorf("cannot revoke blank token")
- }
-
- saltedID, err := ts.SaltID(ctx, id)
- if err != nil {
- return err
- }
-
- return ts.revokeInternal(ctx, saltedID, false)
-}
-
-// revokeInternal is used to invalidate a given salted token, any child tokens
-// will be orphaned unless otherwise specified. skipOrphan should be used
-// whenever we are revoking the entire tree starting from a particular parent
-// (e.g. revokeTreeInternal).
-func (ts *TokenStore) revokeInternal(ctx context.Context, saltedID string, skipOrphan bool) (ret error) {
- // Check and set the token deletion state. We only proceed with the deletion
- // if we don't have a pending deletion (empty), or if the deletion previously
- // failed (state is false)
- state, loaded := ts.tokensPendingDeletion.LoadOrStore(saltedID, true)
-
- // If the entry was loaded and its state is true, we short-circuit
- if loaded && state == true {
- return nil
- }
-
- // The map check above should protect use from any concurrent revocations, so
- // we do another lookup here to make sure we have the right state
- entry, err := ts.lookupInternal(ctx, saltedID, true, true)
- if err != nil {
- return err
- }
- if entry == nil {
- return nil
- }
-
- if entry.NumUses != tokenRevocationPending {
- entry.NumUses = tokenRevocationPending
- if err := ts.store(ctx, entry); err != nil {
- // The only real reason for this is an underlying storage error
- // which also means that nothing else in this func or expmgr will
- // really work either. So we clear revocation state so the user can
- // try again.
- ts.logger.Error("failed to mark token as revoked")
- ts.tokensPendingDeletion.Store(entry.ID, false)
- return err
- }
- }
-
- tokenNS, err := NamespaceByID(ctx, entry.NamespaceID, ts.core)
- if err != nil {
- return err
- }
- if tokenNS == nil {
- return namespace.ErrNoNamespace
- }
-
- defer func() {
- // If we succeeded in all other revocation operations after this defer and
- // before we return, we can remove the token store entry
- if ret == nil {
- if err := ts.idView(tokenNS).Delete(ctx, saltedID); err != nil {
- ret = errwrap.Wrapf("failed to delete entry: {{err}}", err)
- }
- }
-
- // Check on ret again and update the sync.Map accordingly
- if ret != nil {
- // If we failed on any of the calls within, we store the state as false
- // so that the next call to revokeInternal will retry
- ts.tokensPendingDeletion.Store(saltedID, false)
- } else {
- ts.tokensPendingDeletion.Delete(saltedID)
- }
- }()
-
- // Destroy the token's cubby. This should go first as it's a
- // security-sensitive item.
- err = ts.cubbyholeDestroyer(ctx, ts, entry)
- if err != nil {
- return err
- }
-
- revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS)
- if err := ts.expiration.RevokeByToken(revokeCtx, entry); err != nil {
- return err
- }
-
- // Clear the secondary index if any
- if entry.Parent != "" {
- _, parentNSID := namespace.SplitIDFromString(entry.Parent)
- parentCtx := revokeCtx
- parentNS := tokenNS
-
- if parentNSID != tokenNS.ID {
- switch {
- case parentNSID == "":
- parentNS = namespace.RootNamespace
- default:
- parentNS, err = NamespaceByID(ctx, parentNSID, ts.core)
- if err != nil {
- return errwrap.Wrapf("failed to get parent namespace: {{err}}", err)
- }
- if parentNS == nil {
- return namespace.ErrNoNamespace
- }
- }
-
- parentCtx = namespace.ContextWithNamespace(ctx, parentNS)
- }
-
- parentSaltedID, err := ts.SaltID(parentCtx, entry.Parent)
- if err != nil {
- return err
- }
-
- path := parentSaltedID + "/" + saltedID
- if tokenNS.ID != namespace.RootNamespaceID {
- path = fmt.Sprintf("%s.%s", path, tokenNS.ID)
- }
-
- if err = ts.parentView(parentNS).Delete(ctx, path); err != nil {
- return errwrap.Wrapf("failed to delete entry: {{err}}", err)
- }
- }
-
- // Clear the accessor index if any
- if entry.Accessor != "" {
- accessorSaltedID, err := ts.SaltID(revokeCtx, entry.Accessor)
- if err != nil {
- return err
- }
-
- if err = ts.accessorView(tokenNS).Delete(ctx, accessorSaltedID); err != nil {
- return errwrap.Wrapf("failed to delete entry: {{err}}", err)
- }
- }
-
- if !skipOrphan {
- // Mark all children token as orphan by removing
- // their parent index, and clear the parent entry.
- //
- // Marking the token as orphan should be skipped if it's called by
- // revokeTreeInternal to avoid unnecessary view.List operations. Since
- // the deletion occurs in a DFS fashion we don't need to perform a delete
- // on child prefixes as there will be none (as saltedID entry is a leaf node).
- children, err := ts.parentView(tokenNS).List(ctx, saltedID+"/")
- if err != nil {
- return errwrap.Wrapf("failed to scan for children: {{err}}", err)
- }
- for _, child := range children {
- var childNSID string
- childCtx := revokeCtx
- child, childNSID = namespace.SplitIDFromString(child)
- if childNSID != "" {
- childNS, err := NamespaceByID(ctx, childNSID, ts.core)
- if err != nil {
- return errwrap.Wrapf("failed to get child token: {{err}}", err)
- }
- if childNS == nil {
- return namespace.ErrNoNamespace
- }
-
- childCtx = namespace.ContextWithNamespace(ctx, childNS)
- }
-
- entry, err := ts.lookupInternal(childCtx, child, true, true)
- if err != nil {
- return errwrap.Wrapf("failed to get child token: {{err}}", err)
- }
- if entry == nil {
- // Seems it's already revoked, so nothing to do here except delete the index
- err = ts.parentView(tokenNS).Delete(ctx, child)
- if err != nil {
- return errwrap.Wrapf("failed to delete child entry: {{err}}", err)
- }
- continue
- }
-
- lock := locksutil.LockForKey(ts.tokenLocks, entry.ID)
- lock.Lock()
-
- entry.Parent = ""
- err = ts.store(childCtx, entry)
- if err != nil {
- lock.Unlock()
- return errwrap.Wrapf("failed to update child token: {{err}}", err)
- }
- lock.Unlock()
-
- // Delete the the child storage entry after we update the token entry Since
- // paths are not deeply nested (i.e. they are simply
- // parenPrefix//), we can simply call view.Delete instead
- // of logical.ClearView
- err = ts.parentView(tokenNS).Delete(ctx, child)
- if err != nil {
- return errwrap.Wrapf("failed to delete child entry: {{err}}", err)
- }
- }
- }
-
- return nil
-}
-
-// revokeTree is used to invalidate a given token and all
-// child tokens.
-func (ts *TokenStore) revokeTree(ctx context.Context, le *leaseEntry) error {
- defer metrics.MeasureSince([]string{"token", "revoke-tree"}, time.Now())
- // Verify the token is not blank
- if le.ClientToken == "" {
- return fmt.Errorf("cannot tree-revoke blank token")
- }
-
- // In case lookup fails for some reason for the token itself, set the
- // context for the next call from the lease entry's NS. This function is
- // only called when a lease for a given token is expiring, so it should run
- // in the context of the token namespace
- revCtx := namespace.ContextWithNamespace(ctx, le.namespace)
-
- saltedID, err := ts.SaltID(revCtx, le.ClientToken)
- if err != nil {
- return err
- }
-
- // Nuke the entire tree recursively
- return ts.revokeTreeInternal(revCtx, saltedID)
-}
-
-// revokeTreeInternal is used to invalidate a given token and all
-// child tokens.
-// Updated to be non-recursive and revoke child tokens
-// before parent tokens(DFS).
-func (ts *TokenStore) revokeTreeInternal(ctx context.Context, id string) error {
- dfs := []string{id}
- seenIDs := make(map[string]struct{})
-
- var ns *namespace.Namespace
-
- te, err := ts.lookupInternal(ctx, id, true, true)
- if err != nil {
- return err
- }
- if te == nil {
- ns, err = namespace.FromContext(ctx)
- if err != nil {
- return err
- }
- } else {
- ns, err = NamespaceByID(ctx, te.NamespaceID, ts.core)
- if err != nil {
- return err
- }
- }
- if ns == nil {
- return fmt.Errorf("failed to find namespace for token revocation")
- }
-
- for l := len(dfs); l > 0; l = len(dfs) {
- id := dfs[len(dfs)-1]
- seenIDs[id] = struct{}{}
-
- saltedCtx := ctx
- saltedNS := ns
- saltedID, saltedNSID := namespace.SplitIDFromString(id)
- if saltedNSID != "" {
- saltedNS, err = NamespaceByID(ctx, saltedNSID, ts.core)
- if err != nil {
- return errwrap.Wrapf("failed to find namespace for token revocation: {{err}}", err)
- }
-
- saltedCtx = namespace.ContextWithNamespace(ctx, saltedNS)
- }
-
- path := saltedID + "/"
- childrenRaw, err := ts.parentView(saltedNS).List(saltedCtx, path)
- if err != nil {
- return errwrap.Wrapf("failed to scan for children: {{err}}", err)
- }
-
- // Filter the child list to remove any items that have ever been in the dfs stack.
- // This is a robustness check, as a parent/child cycle can lead to an OOM crash.
- children := make([]string, 0, len(childrenRaw))
- for _, child := range childrenRaw {
- if _, seen := seenIDs[child]; !seen {
- children = append(children, child)
- } else {
- if err = ts.parentView(saltedNS).Delete(saltedCtx, path+child); err != nil {
- return errwrap.Wrapf("failed to delete entry: {{err}}", err)
- }
-
- ts.Logger().Warn("token cycle found", "token", child)
- }
- }
-
- // If the length of the children array is zero,
- // then we are at a leaf node.
- if len(children) == 0 {
- // Whenever revokeInternal is called, the token will be removed immediately and
- // any underlying secrets will be handed off to the expiration manager which will
- // take care of expiring them. If Vault is restarted, any revoked tokens
- // would have been deleted, and any pending leases for deletion will be restored
- // by the expiration manager.
- if err := ts.revokeInternal(saltedCtx, saltedID, true); err != nil {
- return errwrap.Wrapf("failed to revoke entry: {{err}}", err)
- }
- // If the length of l is equal to 1, then the last token has been deleted
- if l == 1 {
- return nil
- }
- dfs = dfs[:len(dfs)-1]
- } else {
- // If we make it here, there are children and they must be appended.
- dfs = append(dfs, children...)
- }
- }
-
- return nil
-}
-
-func (c *Core) IsBatchTokenCreationRequest(ctx context.Context, path string) (bool, error) {
- name := strings.TrimPrefix(path, "auth/token/create/")
- roleEntry, err := c.tokenStore.tokenStoreRole(ctx, name)
- if err != nil {
- return false, err
- }
- return roleEntry.TokenType == logical.TokenTypeBatch, nil
-}
-
-// handleCreateAgainstRole handles the auth/token/create path for a role
-func (ts *TokenStore) handleCreateAgainstRole(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- name := d.Get("role_name").(string)
- roleEntry, err := ts.tokenStoreRole(ctx, name)
- if err != nil {
- return nil, err
- }
- if roleEntry == nil {
- return logical.ErrorResponse(fmt.Sprintf("unknown role %s", name)), nil
- }
-
- return ts.handleCreateCommon(ctx, req, d, false, roleEntry)
-}
-
-func (ts *TokenStore) lookupByAccessor(ctx context.Context, id string, salted, tainted bool) (accessorEntry, error) {
- var aEntry accessorEntry
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return aEntry, err
- }
-
- lookupID := id
- if !salted {
- _, nsID := namespace.SplitIDFromString(id)
- if nsID != "" {
- accessorNS, err := NamespaceByID(ctx, nsID, ts.core)
- if err != nil {
- return aEntry, err
- }
- if accessorNS != nil {
- if accessorNS.ID != ns.ID {
- ns = accessorNS
- ctx = namespace.ContextWithNamespace(ctx, accessorNS)
- }
- }
- } else {
- // Any non-root-ns token should have an accessor and child
- // namespaces cannot have custom IDs. If someone omits or tampers
- // with it, the lookup in the root namespace simply won't work.
- ns = namespace.RootNamespace
- ctx = namespace.ContextWithNamespace(ctx, ns)
- }
-
- lookupID, err = ts.SaltID(ctx, id)
- if err != nil {
- return aEntry, err
- }
- }
-
- entry, err := ts.accessorView(ns).Get(ctx, lookupID)
-
- if err != nil {
- return aEntry, errwrap.Wrapf("failed to read index using accessor: {{err}}", err)
- }
- if entry == nil {
- return aEntry, &logical.StatusBadRequest{Err: "invalid accessor"}
- }
-
- err = jsonutil.DecodeJSON(entry.Value, &aEntry)
- // If we hit an error, assume it's a pre-struct straight token ID
- if err != nil {
- te, err := ts.lookupInternal(ctx, string(entry.Value), false, tainted)
- if err != nil {
- return accessorEntry{}, errwrap.Wrapf("failed to look up token using accessor index: {{err}}", err)
- }
- // It's hard to reason about what to do here if te is nil -- it may be
- // that the token was revoked async, or that it's an old accessor index
- // entry that was somehow not cleared up, or or or. A nonexistent token
- // entry on lookup is nil, not an error, so we keep that behavior here
- // to be safe...the token ID is simply not filled in.
- if te != nil {
- aEntry.TokenID = te.ID
- aEntry.AccessorID = te.Accessor
- aEntry.NamespaceID = te.NamespaceID
- }
- }
-
- if aEntry.NamespaceID == "" {
- aEntry.NamespaceID = namespace.RootNamespaceID
- }
-
- return aEntry, nil
-}
-
-// handleTidy handles the cleaning up of leaked accessor storage entries and
-// cleaning up of leases that are associated to tokens that are expired.
-func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- if !atomic.CompareAndSwapUint32(ts.tidyLock, 0, 1) {
- resp := &logical.Response{}
- resp.AddWarning("Tidy operation already in progress.")
- return resp, nil
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, errwrap.Wrapf("failed get namespace from context: {{err}}", err)
- }
-
- go func() {
- defer atomic.StoreUint32(ts.tidyLock, 0)
-
- logger := ts.logger.Named("tidy")
-
- var tidyErrors *multierror.Error
-
- doTidy := func() error {
-
- ts.logger.Info("beginning tidy operation on tokens")
- defer ts.logger.Info("finished tidy operation on tokens")
-
- quitCtx := namespace.ContextWithNamespace(ts.quitContext, ns)
-
- // List out all the accessors
- saltedAccessorList, err := ts.accessorView(ns).List(quitCtx, "")
- if err != nil {
- return errwrap.Wrapf("failed to fetch accessor index entries: {{err}}", err)
- }
-
- // First, clean up secondary index entries that are no longer valid
- parentList, err := ts.parentView(ns).List(quitCtx, "")
- if err != nil {
- return errwrap.Wrapf("failed to fetch secondary index entries: {{err}}", err)
- }
-
- var countParentEntries, deletedCountParentEntries, countParentList, deletedCountParentList int64
-
- // Scan through the secondary index entries; if there is an entry
- // with the token's salt ID at the end, remove it
- for _, parent := range parentList {
- countParentEntries++
-
- // Get the children
- children, err := ts.parentView(ns).List(quitCtx, parent)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to read secondary index: {{err}}", err))
- continue
- }
-
- // First check if the salt ID of the parent exists, and if not mark this so
- // that deletion of children later with this loop below applies to all
- // children
- originalChildrenCount := int64(len(children))
- exists, _ := ts.lookupInternal(quitCtx, strings.TrimSuffix(parent, "/"), true, true)
- if exists == nil {
- ts.logger.Debug("deleting invalid parent prefix entry", "index", parentPrefix+parent)
- }
-
- var deletedChildrenCount int64
- for _, child := range children {
- countParentList++
- if countParentList%500 == 0 {
- ts.logger.Info("checking validity of tokens in secondary index list", "progress", countParentList)
- }
-
- // Look up tainted entries so we can be sure that if this isn't
- // found, it doesn't exist. Doing the following without locking
- // since appropriate locks cannot be held with salted token IDs.
- // Also perform deletion if the parent doesn't exist any more.
- te, _ := ts.lookupInternal(quitCtx, child, true, true)
- // If the child entry is not nil, but the parent doesn't exist, then turn
- // that child token into an orphan token. Theres no deletion in this case.
- if te != nil && exists == nil {
- lock := locksutil.LockForKey(ts.tokenLocks, te.ID)
- lock.Lock()
-
- te.Parent = ""
- err = ts.store(quitCtx, te)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to convert child token into an orphan token: {{err}}", err))
- }
- lock.Unlock()
- continue
- }
- // Otherwise, if the entry doesn't exist, or if the parent doesn't exist go
- // on with the delete on the secondary index
- if te == nil || exists == nil {
- index := parent + child
- ts.logger.Debug("deleting invalid secondary index", "index", index)
- err = ts.parentView(ns).Delete(quitCtx, index)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete secondary index: {{err}}", err))
- continue
- }
- deletedChildrenCount++
- }
- }
- // Add current children deleted count to the total count
- deletedCountParentList += deletedChildrenCount
- // N.B.: We don't call delete on the parent prefix since physical.Backend.Delete
- // implementations should be in charge of deleting empty prefixes.
- // If we deleted all the children, then add that to our deleted parent entries count.
- if originalChildrenCount == deletedChildrenCount {
- deletedCountParentEntries++
- }
- }
-
- var countAccessorList,
- deletedCountAccessorEmptyToken,
- deletedCountAccessorInvalidToken,
- deletedCountInvalidTokenInAccessor int64
-
- // For each of the accessor, see if the token ID associated with it is
- // a valid one. If not, delete the leases associated with that token
- // and delete the accessor as well.
- for _, saltedAccessor := range saltedAccessorList {
- countAccessorList++
- if countAccessorList%500 == 0 {
- ts.logger.Info("checking if accessors contain valid tokens", "progress", countAccessorList)
- }
-
- accessorEntry, err := ts.lookupByAccessor(quitCtx, saltedAccessor, true, true)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to read the accessor index: {{err}}", err))
- continue
- }
-
- // A valid accessor storage entry should always have a token ID
- // in it. If not, it is an invalid accessor entry and needs to
- // be deleted.
- if accessorEntry.TokenID == "" {
- // If deletion of accessor fails, move on to the next
- // item since this is just a best-effort operation
- err = ts.accessorView(ns).Delete(quitCtx, saltedAccessor)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete the accessor index: {{err}}", err))
- continue
- }
- deletedCountAccessorEmptyToken++
- }
-
- lock := locksutil.LockForKey(ts.tokenLocks, accessorEntry.TokenID)
- lock.RLock()
-
- // Look up tainted variants so we only find entries that truly don't
- // exist
- te, err := ts.lookupInternal(quitCtx, accessorEntry.TokenID, false, true)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to lookup tainted ID: {{err}}", err))
- lock.RUnlock()
- continue
- }
-
- lock.RUnlock()
-
- // If token entry is not found assume that the token is not valid any
- // more and conclude that accessor, leases, and secondary index entries
- // for this token should not exist as well.
- if te == nil {
- ts.logger.Info("deleting token with nil entry referenced by accessor", "salted_accessor", saltedAccessor)
-
- // RevokeByToken expects a '*logical.TokenEntry'. For the
- // purposes of tidying, it is sufficient if the token
- // entry only has ID set.
- tokenEntry := &logical.TokenEntry{
- ID: accessorEntry.TokenID,
- NamespaceID: accessorEntry.NamespaceID,
- }
-
- // Attempt to revoke the token. This will also revoke
- // the leases associated with the token.
- err = ts.expiration.RevokeByToken(quitCtx, tokenEntry)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to revoke leases of expired token: {{err}}", err))
- continue
- }
- deletedCountInvalidTokenInAccessor++
-
- // If deletion of accessor fails, move on to the next item since
- // this is just a best-effort operation. We do this last so that on
- // next run if something above failed we still have the accessor
- // entry to try again.
- err = ts.accessorView(ns).Delete(quitCtx, saltedAccessor)
- if err != nil {
- tidyErrors = multierror.Append(tidyErrors, errwrap.Wrapf("failed to delete accessor entry: {{err}}", err))
- continue
- }
- deletedCountAccessorInvalidToken++
- }
- }
-
- ts.logger.Info("number of entries scanned in parent prefix", "count", countParentEntries)
- ts.logger.Info("number of entries deleted in parent prefix", "count", deletedCountParentEntries)
- ts.logger.Info("number of tokens scanned in parent index list", "count", countParentList)
- ts.logger.Info("number of tokens revoked in parent index list", "count", deletedCountParentList)
- ts.logger.Info("number of accessors scanned", "count", countAccessorList)
- ts.logger.Info("number of deleted accessors which had empty tokens", "count", deletedCountAccessorEmptyToken)
- ts.logger.Info("number of revoked tokens which were invalid but present in accessors", "count", deletedCountInvalidTokenInAccessor)
- ts.logger.Info("number of deleted accessors which had invalid tokens", "count", deletedCountAccessorInvalidToken)
-
- return tidyErrors.ErrorOrNil()
- }
-
- if err := doTidy(); err != nil {
- logger.Error("error running tidy", "error", err)
- return
- }
- }()
-
- resp := &logical.Response{}
- resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.")
- return logical.RespondWithStatusCode(resp, req, http.StatusAccepted)
-}
-
-// handleUpdateLookupAccessor handles the auth/token/lookup-accessor path for returning
-// the properties of the token associated with the accessor
-func (ts *TokenStore) handleUpdateLookupAccessor(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- accessor := data.Get("accessor").(string)
- if accessor == "" {
- return nil, &logical.StatusBadRequest{Err: "missing accessor"}
- }
-
- aEntry, err := ts.lookupByAccessor(ctx, accessor, false, false)
- if err != nil {
- return nil, err
- }
-
- // Prepare the field data required for a lookup call
- d := &framework.FieldData{
- Raw: map[string]interface{}{
- "token": aEntry.TokenID,
- },
- Schema: map[string]*framework.FieldSchema{
- "token": &framework.FieldSchema{
- Type: framework.TypeString,
- Description: "Token to lookup",
- },
- },
- }
- resp, err := ts.handleLookup(ctx, req, d)
- if err != nil {
- return nil, err
- }
- if resp == nil {
- return nil, fmt.Errorf("failed to lookup the token")
- }
- if resp.IsError() {
- return resp, nil
-
- }
-
- // Remove the token ID from the response
- if resp.Data != nil {
- resp.Data["id"] = ""
- }
-
- return resp, nil
-}
-
-// handleUpdateRevokeAccessor handles the auth/token/revoke-accessor path for revoking
-// the token associated with the accessor
-func (ts *TokenStore) handleUpdateRevokeAccessor(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- accessor := data.Get("accessor").(string)
- if accessor == "" {
- return nil, &logical.StatusBadRequest{Err: "missing accessor"}
- }
-
- aEntry, err := ts.lookupByAccessor(ctx, accessor, false, true)
- if err != nil {
- return nil, err
- }
-
- te, err := ts.Lookup(ctx, aEntry.TokenID)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return logical.ErrorResponse("token not found"), logical.ErrInvalidRequest
- }
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, ts.core)
- if err != nil {
- return nil, err
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS)
- leaseID, err := ts.expiration.CreateOrFetchRevocationLeaseByToken(revokeCtx, te)
- if err != nil {
- return nil, err
- }
-
- err = ts.expiration.Revoke(revokeCtx, leaseID)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// handleCreate handles the auth/token/create path for creation of new orphan
-// tokens
-func (ts *TokenStore) handleCreateOrphan(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return ts.handleCreateCommon(ctx, req, d, true, nil)
-}
-
-// handleCreate handles the auth/token/create path for creation of new non-orphan
-// tokens
-func (ts *TokenStore) handleCreate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- return ts.handleCreateCommon(ctx, req, d, false, nil)
-}
-
-// handleCreateCommon handles the auth/token/create path for creation of new tokens
-func (ts *TokenStore) handleCreateCommon(ctx context.Context, req *logical.Request, d *framework.FieldData, orphan bool, role *tsRoleEntry) (*logical.Response, error) {
- // Read the parent policy
- parent, err := ts.Lookup(ctx, req.ClientToken)
- if err != nil {
- return nil, errwrap.Wrapf("parent token lookup failed: {{err}}", err)
- }
- if parent == nil {
- return logical.ErrorResponse("parent token lookup failed: no parent found"), logical.ErrInvalidRequest
- }
- if parent.Type == logical.TokenTypeBatch {
- return logical.ErrorResponse("batch tokens cannot create more tokens"), nil
- }
-
- // A token with a restricted number of uses cannot create a new token
- // otherwise it could escape the restriction count.
- if parent.NumUses > 0 {
- return logical.ErrorResponse("restricted use token cannot generate child tokens"),
- logical.ErrInvalidRequest
- }
-
- // Check if the client token has sudo/root privileges for the requested path
- isSudo := ts.System().SudoPrivilege(ctx, req.MountPoint+req.Path, req.ClientToken)
-
- // Read and parse the fields
- var data struct {
- ID string
- Policies []string
- Metadata map[string]string `mapstructure:"meta"`
- NoParent bool `mapstructure:"no_parent"`
- NoDefaultPolicy bool `mapstructure:"no_default_policy"`
- Lease string
- TTL string
- Renewable *bool
- ExplicitMaxTTL string `mapstructure:"explicit_max_ttl"`
- DisplayName string `mapstructure:"display_name"`
- NumUses int `mapstructure:"num_uses"`
- Period string
- Type string `mapstructure:"type"`
- }
- if err := mapstructure.WeakDecode(req.Data, &data); err != nil {
- return logical.ErrorResponse(fmt.Sprintf(
- "Error decoding request: %s", err)), logical.ErrInvalidRequest
- }
-
- // If the context's namespace is different from the parent and this is an
- // orphan token creation request, then this is an admin token generation for
- // the namespace
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- if ns.ID != parent.NamespaceID {
- parentNS, err := NamespaceByID(ctx, parent.NamespaceID, ts.core)
- if err != nil {
- ts.logger.Error("error looking up parent namespace", "error", err, "parent_namespace", parent.NamespaceID)
- return nil, ErrInternalError
- }
- if parentNS == nil {
- ts.logger.Error("could not find information for parent namespace", "parent_namespace", parent.NamespaceID)
- return nil, ErrInternalError
- }
-
- if !isSudo {
- return logical.ErrorResponse("root or sudo privileges required generate a namespace admin token"), logical.ErrInvalidRequest
- }
-
- if strutil.StrListContains(data.Policies, "root") {
- return logical.ErrorResponse("root tokens may not be created from a parent namespace"), logical.ErrInvalidRequest
- }
- }
-
- renewable := true
- if data.Renewable != nil {
- renewable = *data.Renewable
- }
-
- tokenType := logical.TokenTypeService
- tokenTypeStr := data.Type
- if role != nil {
- switch role.TokenType {
- case logical.TokenTypeDefault, logical.TokenTypeDefaultService:
- // Use the user-given value, but fall back to service
- case logical.TokenTypeDefaultBatch:
- // Use the user-given value, but fall back to batch
- if tokenTypeStr == "" {
- tokenTypeStr = logical.TokenTypeBatch.String()
- }
- case logical.TokenTypeService:
- tokenTypeStr = logical.TokenTypeService.String()
- case logical.TokenTypeBatch:
- tokenTypeStr = logical.TokenTypeBatch.String()
- default:
- return logical.ErrorResponse(fmt.Sprintf("role being used for token creation contains invalid token type %q", role.TokenType.String())), nil
- }
- }
- switch tokenTypeStr {
- case "", "service":
- case "batch":
- var badReason string
- switch {
- case data.ExplicitMaxTTL != "":
- dur, err := parseutil.ParseDurationSecond(data.ExplicitMaxTTL)
- if err != nil {
- return logical.ErrorResponse(`"explicit_max_ttl" value could not be parsed`), nil
- }
- if dur != 0 {
- badReason = "explicit_max_ttl"
- }
- case data.NumUses != 0:
- badReason = "num_uses"
- case data.Period != "":
- dur, err := parseutil.ParseDurationSecond(data.Period)
- if err != nil {
- return logical.ErrorResponse(`"period" value could not be parsed`), nil
- }
- if dur != 0 {
- badReason = "period"
- }
- }
- if badReason != "" {
- return logical.ErrorResponse(fmt.Sprintf("batch tokens cannot have %q set", badReason)), nil
- }
- tokenType = logical.TokenTypeBatch
- renewable = false
- default:
- return logical.ErrorResponse("invalid 'token_type' value"), logical.ErrInvalidRequest
- }
-
- // Verify the number of uses is positive
- if data.NumUses < 0 {
- return logical.ErrorResponse("number of uses cannot be negative"),
- logical.ErrInvalidRequest
- }
-
- // Setup the token entry
- te := logical.TokenEntry{
- Parent: req.ClientToken,
-
- // The mount point is always the same since we have only one token
- // store; using req.MountPoint causes trouble in tests since they don't
- // have an official mount
- Path: fmt.Sprintf("auth/token/%s", req.Path),
-
- Meta: data.Metadata,
- DisplayName: "token",
- NumUses: data.NumUses,
- CreationTime: time.Now().Unix(),
- NamespaceID: ns.ID,
- Type: tokenType,
- }
-
- // If the role is not nil, we add the role name as part of the token's
- // path. This makes it much easier to later revoke tokens that were issued
- // by a role (using revoke-prefix). Users can further specify a PathSuffix
- // in the role; that way they can use something like "v1", "v2" to indicate
- // role revisions, and revoke only tokens issued with a previous revision.
- if role != nil {
- te.Role = role.Name
-
- // If renewable hasn't been disabled in the call and the role has
- // renewability disabled, set renewable false
- if renewable && !role.Renewable {
- renewable = false
- }
-
- if role.PathSuffix != "" {
- te.Path = fmt.Sprintf("%s/%s", te.Path, role.PathSuffix)
- }
- }
-
- // Attach the given display name if any
- if data.DisplayName != "" {
- full := "token-" + data.DisplayName
- full = displayNameSanitize.ReplaceAllString(full, "-")
- full = strings.TrimSuffix(full, "-")
- te.DisplayName = full
- }
-
- // Allow specifying the ID of the token if the client has root or sudo privileges
- if data.ID != "" {
- if !isSudo {
- return logical.ErrorResponse("root or sudo privileges required to specify token id"),
- logical.ErrInvalidRequest
- }
- if ns.ID != namespace.RootNamespaceID {
- return logical.ErrorResponse("token IDs can only be manually specified in the root namespace"),
- logical.ErrInvalidRequest
- }
- te.ID = data.ID
- }
-
- resp := &logical.Response{}
-
- var addDefault bool
-
- // N.B.: The logic here uses various calculations as to whether default
- // should be added. In the end we decided that if NoDefaultPolicy is set it
- // should be stripped out regardless, *but*, the logic of when it should
- // and shouldn't be added is kept because we want to do subset comparisons
- // based on adding default when it's correct to do so.
- switch {
- case role != nil && (len(role.AllowedPolicies) > 0 || len(role.DisallowedPolicies) > 0):
- // Holds the final set of policies as they get munged
- var finalPolicies []string
-
- // We don't make use of the global one because roles with allowed or
- // disallowed set do their own policy rules
- var localAddDefault bool
-
- // If the request doesn't say not to add "default" and if "default"
- // isn't in the disallowed list, add it. This is in line with the idea
- // that roles, when allowed/disallowed ar set, allow a subset of
- // policies to be set disjoint from the parent token's policies.
- if !data.NoDefaultPolicy && !strutil.StrListContains(role.DisallowedPolicies, "default") {
- localAddDefault = true
- }
-
- // Start with passed-in policies as a baseline, if they exist
- if len(data.Policies) > 0 {
- finalPolicies = policyutil.SanitizePolicies(data.Policies, localAddDefault)
- }
-
- var sanitizedRolePolicies []string
-
- // First check allowed policies; if policies are specified they will be
- // checked, otherwise if an allowed set exists that will be the set
- // that is used
- if len(role.AllowedPolicies) > 0 {
- // Note that if "default" is already in allowed, and also in
- // disallowed, this will still result in an error later since this
- // doesn't strip out default
- sanitizedRolePolicies = policyutil.SanitizePolicies(role.AllowedPolicies, localAddDefault)
-
- if len(finalPolicies) == 0 {
- finalPolicies = sanitizedRolePolicies
- } else {
- if !strutil.StrListSubset(sanitizedRolePolicies, finalPolicies) {
- return logical.ErrorResponse(fmt.Sprintf("token policies (%q) must be subset of the role's allowed policies (%q)", finalPolicies, sanitizedRolePolicies)), logical.ErrInvalidRequest
- }
- }
- } else {
- // Assign parent policies if none have been requested. As this is a
- // role, add default unless explicitly disabled.
- if len(finalPolicies) == 0 {
- finalPolicies = policyutil.SanitizePolicies(parent.Policies, localAddDefault)
- }
- }
-
- if len(role.DisallowedPolicies) > 0 {
- // We don't add the default here because we only want to disallow it if it's explicitly set
- sanitizedRolePolicies = strutil.RemoveDuplicates(role.DisallowedPolicies, true)
-
- for _, finalPolicy := range finalPolicies {
- if strutil.StrListContains(sanitizedRolePolicies, finalPolicy) {
- return logical.ErrorResponse(fmt.Sprintf("token policy %q is disallowed by this role", finalPolicy)), logical.ErrInvalidRequest
- }
- }
- }
-
- data.Policies = finalPolicies
-
- // We are creating a token from a parent namespace. We should only use the input
- // policies.
- case ns.ID != parent.NamespaceID:
- addDefault = !data.NoDefaultPolicy
-
- // No policies specified, inherit parent
- case len(data.Policies) == 0:
- // Only inherit "default" if the parent already has it, so don't touch addDefault here
- data.Policies = policyutil.SanitizePolicies(parent.Policies, policyutil.DoNotAddDefaultPolicy)
-
- // When a role is not in use or does not specify allowed/disallowed, only
- // permit policies to be a subset unless the client has root or sudo
- // privileges. Default is added in this case if the parent has it, unless
- // the client specified for it not to be added.
- case !isSudo:
- // Sanitize passed-in and parent policies before comparison
- sanitizedInputPolicies := policyutil.SanitizePolicies(data.Policies, policyutil.DoNotAddDefaultPolicy)
- sanitizedParentPolicies := policyutil.SanitizePolicies(parent.Policies, policyutil.DoNotAddDefaultPolicy)
-
- if !strutil.StrListSubset(sanitizedParentPolicies, sanitizedInputPolicies) {
- return logical.ErrorResponse("child policies must be subset of parent"), logical.ErrInvalidRequest
- }
-
- // If the parent has default, and they haven't requested not to get it,
- // add it. Note that if they have explicitly put "default" in
- // data.Policies it will still be added because NoDefaultPolicy
- // controls *automatic* adding.
- if !data.NoDefaultPolicy && strutil.StrListContains(parent.Policies, "default") {
- addDefault = true
- }
-
- // Add default by default in this case unless requested not to
- case isSudo:
- addDefault = !data.NoDefaultPolicy
- }
-
- te.Policies = policyutil.SanitizePolicies(data.Policies, addDefault)
-
- // Yes, this is a little inefficient to do it like this, but meh
- if data.NoDefaultPolicy {
- te.Policies = strutil.StrListDelete(te.Policies, "default")
- }
-
- // Prevent internal policies from being assigned to tokens
- for _, policy := range te.Policies {
- if strutil.StrListContains(nonAssignablePolicies, policy) {
- return logical.ErrorResponse(fmt.Sprintf("cannot assign policy %q", policy)), nil
- }
- }
-
- if strutil.StrListContains(te.Policies, "root") {
- // Prevent attempts to create a root token without an actual root token as parent.
- // This is to thwart privilege escalation by tokens having 'sudo' privileges.
- if !strutil.StrListContains(parent.Policies, "root") {
- return logical.ErrorResponse("root tokens may not be created without parent token being root"), logical.ErrInvalidRequest
- }
-
- if te.Type == logical.TokenTypeBatch {
- // Batch tokens cannot be revoked so we should never have root batch tokens
- return logical.ErrorResponse("batch tokens cannot be root tokens"), nil
- }
- }
-
- //
- // NOTE: Do not modify policies below this line. We need the checks above
- // to be the last checks as they must look at the final policy set.
- //
-
- switch {
- case role != nil:
- if role.Orphan {
- te.Parent = ""
- }
-
- if len(role.BoundCIDRs) > 0 {
- te.BoundCIDRs = role.BoundCIDRs
- }
-
- case data.NoParent:
- // Only allow an orphan token if the client has sudo policy
- if !isSudo {
- return logical.ErrorResponse("root or sudo privileges required to create orphan token"),
- logical.ErrInvalidRequest
- }
-
- te.Parent = ""
-
- default:
- // This comes from create-orphan, which can be properly ACLd
- if orphan {
- te.Parent = ""
- }
- }
-
- // At this point, it is clear whether the token is going to be an orphan or
- // not. If the token is not going to be an orphan, inherit the parent's
- // entity identifier into the child token. We must also verify that, if
- // it's not an orphan, the parent isn't a batch token.
- if te.Parent != "" {
- te.EntityID = parent.EntityID
- }
-
- var explicitMaxTTLToUse time.Duration
- if data.ExplicitMaxTTL != "" {
- dur, err := parseutil.ParseDurationSecond(data.ExplicitMaxTTL)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- if dur < 0 {
- return logical.ErrorResponse("explicit_max_ttl must be positive"), logical.ErrInvalidRequest
- }
- te.ExplicitMaxTTL = dur
- explicitMaxTTLToUse = dur
- }
-
- var periodToUse time.Duration
- if data.Period != "" {
- dur, err := parseutil.ParseDurationSecond(data.Period)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-
- switch {
- case dur < 0:
- return logical.ErrorResponse("period must be positive"), logical.ErrInvalidRequest
- case dur == 0:
- default:
- if !isSudo {
- return logical.ErrorResponse("root or sudo privileges required to create periodic token"),
- logical.ErrInvalidRequest
- }
- te.Period = dur
- periodToUse = dur
- }
- }
-
- // Parse the TTL/lease if any
- if data.TTL != "" {
- dur, err := parseutil.ParseDurationSecond(data.TTL)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- if dur < 0 {
- return logical.ErrorResponse("ttl must be positive"), logical.ErrInvalidRequest
- }
- te.TTL = dur
- } else if data.Lease != "" {
- // This block is compatibility
- dur, err := time.ParseDuration(data.Lease)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- if dur < 0 {
- return logical.ErrorResponse("lease must be positive"), logical.ErrInvalidRequest
- }
- te.TTL = dur
- }
-
- // Set the lesser period/explicit max TTL if defined both in arguments and
- // in role. Batch tokens will error out if not set via role, but here we
- // need to explicitly check
- if role != nil && te.Type != logical.TokenTypeBatch {
- if role.ExplicitMaxTTL != 0 {
- switch {
- case explicitMaxTTLToUse == 0:
- explicitMaxTTLToUse = role.ExplicitMaxTTL
- default:
- if role.ExplicitMaxTTL < explicitMaxTTLToUse {
- explicitMaxTTLToUse = role.ExplicitMaxTTL
- }
- resp.AddWarning(fmt.Sprintf("Explicit max TTL specified both during creation call and in role; using the lesser value of %d seconds", int64(explicitMaxTTLToUse.Seconds())))
- }
- }
- if role.Period != 0 {
- switch {
- case periodToUse == 0:
- periodToUse = role.Period
- default:
- if role.Period < periodToUse {
- periodToUse = role.Period
- }
- resp.AddWarning(fmt.Sprintf("Period specified both during creation call and in role; using the lesser value of %d seconds", int64(periodToUse.Seconds())))
- }
- }
- }
-
- sysView := ts.System()
-
- // Only calculate a TTL if you are A) periodic, B) have a TTL, C) do not have a TTL and are not a root token
- if periodToUse > 0 || te.TTL > 0 || (te.TTL == 0 && !strutil.StrListContains(te.Policies, "root")) {
- ttl, warnings, err := framework.CalculateTTL(sysView, 0, te.TTL, periodToUse, 0, explicitMaxTTLToUse, time.Unix(te.CreationTime, 0))
- if err != nil {
- return nil, err
- }
- for _, warning := range warnings {
- resp.AddWarning(warning)
- }
- te.TTL = ttl
- }
-
- // Root tokens are still bound by explicit max TTL
- if te.TTL == 0 && explicitMaxTTLToUse > 0 {
- te.TTL = explicitMaxTTLToUse
- }
-
- // Don't advertise non-expiring root tokens as renewable, as attempts to
- // renew them are denied. Don't CIDR-restrict these either.
- if te.TTL == 0 {
- if parent.TTL != 0 {
- return logical.ErrorResponse("expiring root tokens cannot create non-expiring root tokens"), logical.ErrInvalidRequest
- }
- renewable = false
- te.BoundCIDRs = nil
- }
-
- if te.ID != "" {
- resp.AddWarning("Supplying a custom ID for the token uses the weaker SHA1 hashing instead of the more secure SHA2-256 HMAC for token obfuscation. SHA1 hashed tokens on the wire leads to less secure lookups.")
- }
-
- // Create the token
- if err := ts.create(ctx, &te); err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-
- // Generate the response
- resp.Auth = &logical.Auth{
- NumUses: te.NumUses,
- DisplayName: te.DisplayName,
- Policies: te.Policies,
- Metadata: te.Meta,
- LeaseOptions: logical.LeaseOptions{
- TTL: te.TTL,
- Renewable: renewable,
- },
- ClientToken: te.ID,
- Accessor: te.Accessor,
- EntityID: te.EntityID,
- Period: periodToUse,
- ExplicitMaxTTL: explicitMaxTTLToUse,
- CreationPath: te.Path,
- TokenType: te.Type,
- }
-
- for _, p := range te.Policies {
- policy, err := ts.core.policyStore.GetPolicy(ctx, p, PolicyTypeToken)
- if err != nil {
- return logical.ErrorResponse(fmt.Sprintf("could not look up policy %s", p)), nil
- }
- if policy == nil {
- resp.AddWarning(fmt.Sprintf("Policy %q does not exist", p))
- }
- }
-
- return resp, nil
-}
-
-// handleRevokeSelf handles the auth/token/revoke-self path for revocation of tokens
-// in a way that revokes all child tokens. Normally, using sys/revoke/leaseID will revoke
-// the token and all children anyways, but that is only available when there is a lease.
-func (ts *TokenStore) handleRevokeSelf(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- return ts.revokeCommon(ctx, req, data, req.ClientToken)
-}
-
-// handleRevokeTree handles the auth/token/revoke/id path for revocation of tokens
-// in a way that revokes all child tokens. Normally, using sys/revoke/leaseID will revoke
-// the token and all children anyways, but that is only available when there is a lease.
-func (ts *TokenStore) handleRevokeTree(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- id := data.Get("token").(string)
- if id == "" {
- return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
- }
-
- if resp, err := ts.revokeCommon(ctx, req, data, id); resp != nil || err != nil {
- return resp, err
- }
-
- return nil, nil
-}
-
-func (ts *TokenStore) revokeCommon(ctx context.Context, req *logical.Request, data *framework.FieldData, id string) (*logical.Response, error) {
- te, err := ts.Lookup(ctx, id)
- if err != nil {
- return nil, err
- }
- if te == nil {
- return nil, nil
- }
-
- if te.Type == logical.TokenTypeBatch {
- return logical.ErrorResponse("batch tokens cannot be revoked"), nil
- }
-
- tokenNS, err := NamespaceByID(ctx, te.NamespaceID, ts.core)
- if err != nil {
- return nil, err
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- revokeCtx := namespace.ContextWithNamespace(ts.quitContext, tokenNS)
- leaseID, err := ts.expiration.CreateOrFetchRevocationLeaseByToken(revokeCtx, te)
- if err != nil {
- return nil, err
- }
-
- err = ts.expiration.Revoke(revokeCtx, leaseID)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-// handleRevokeOrphan handles the auth/token/revoke-orphan/id path for revocation of tokens
-// in a way that leaves child tokens orphaned. Normally, using sys/revoke/leaseID will revoke
-// the token and all children.
-func (ts *TokenStore) handleRevokeOrphan(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- // Parse the id
- id := data.Get("token").(string)
- if id == "" {
- return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
- }
-
- // Check if the client token has sudo/root privileges for the requested path
- isSudo := ts.System().SudoPrivilege(ctx, req.MountPoint+req.Path, req.ClientToken)
-
- if !isSudo {
- return logical.ErrorResponse("root or sudo privileges required to revoke and orphan"),
- logical.ErrInvalidRequest
- }
-
- // Do a lookup. Among other things, that will ensure that this is either
- // running in the same namespace or a parent.
- te, err := ts.Lookup(ctx, id)
- if err != nil {
- return nil, errwrap.Wrapf("error when looking up token to revoke: {{err}}", err)
- }
- if te == nil {
- return logical.ErrorResponse("token to revoke not found"), logical.ErrInvalidRequest
- }
-
- if te.Type == logical.TokenTypeBatch {
- return logical.ErrorResponse("batch tokens cannot be revoked"), nil
- }
-
- // Revoke and orphan
- if err := ts.revokeOrphan(ctx, id); err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-
- return nil, nil
-}
-
-func (ts *TokenStore) handleLookupSelf(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- data.Raw["token"] = req.ClientToken
- return ts.handleLookup(ctx, req, data)
-}
-
-// handleLookup handles the auth/token/lookup/id path for querying information about
-// a particular token. This can be used to see which policies are applicable.
-func (ts *TokenStore) handleLookup(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- id := data.Get("token").(string)
- if id == "" {
- id = req.ClientToken
- }
- if id == "" {
- return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
- }
-
- lock := locksutil.LockForKey(ts.tokenLocks, id)
- lock.RLock()
- defer lock.RUnlock()
-
- out, err := ts.lookupInternal(ctx, id, false, true)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
-
- if out == nil {
- return logical.ErrorResponse("bad token"), logical.ErrPermissionDenied
- }
-
- // Generate a response. We purposely omit the parent reference otherwise
- // you could escalate your privileges.
- resp := &logical.Response{
- Data: map[string]interface{}{
- "id": out.ID,
- "accessor": out.Accessor,
- "policies": out.Policies,
- "path": out.Path,
- "meta": out.Meta,
- "display_name": out.DisplayName,
- "num_uses": out.NumUses,
- "orphan": false,
- "creation_time": int64(out.CreationTime),
- "creation_ttl": int64(out.TTL.Seconds()),
- "expire_time": nil,
- "ttl": int64(0),
- "explicit_max_ttl": int64(out.ExplicitMaxTTL.Seconds()),
- "entity_id": out.EntityID,
- "type": out.Type.String(),
- },
- }
-
- if out.Parent == "" {
- resp.Data["orphan"] = true
- }
-
- if out.Role != "" {
- resp.Data["role"] = out.Role
- }
-
- if out.Period != 0 {
- resp.Data["period"] = int64(out.Period.Seconds())
- }
-
- if len(out.BoundCIDRs) > 0 {
- resp.Data["bound_cidrs"] = out.BoundCIDRs
- }
-
- tokenNS, err := NamespaceByID(ctx, out.NamespaceID, ts.core)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- if tokenNS == nil {
- return nil, namespace.ErrNoNamespace
- }
-
- if out.NamespaceID != namespace.RootNamespaceID {
- resp.Data["namespace_path"] = tokenNS.Path
- }
-
- // Fetch the last renewal time
- leaseTimes, err := ts.expiration.FetchLeaseTimesByToken(ctx, out)
- if err != nil {
- return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
- }
- if leaseTimes != nil {
- if !leaseTimes.LastRenewalTime.IsZero() {
- resp.Data["last_renewal_time"] = leaseTimes.LastRenewalTime.Unix()
- resp.Data["last_renewal"] = leaseTimes.LastRenewalTime
- }
- if !leaseTimes.ExpireTime.IsZero() {
- resp.Data["expire_time"] = leaseTimes.ExpireTime
- resp.Data["ttl"] = leaseTimes.ttl()
- }
- renewable, _ := leaseTimes.renewable()
- resp.Data["renewable"] = renewable
- resp.Data["issue_time"] = leaseTimes.IssueTime
- }
-
- if out.EntityID != "" {
- _, identityPolicies, err := ts.core.fetchEntityAndDerivedPolicies(ctx, tokenNS, out.EntityID)
- if err != nil {
- return nil, err
- }
- if len(identityPolicies) != 0 {
- resp.Data["identity_policies"] = identityPolicies[out.NamespaceID]
- delete(identityPolicies, out.NamespaceID)
- resp.Data["external_namespace_policies"] = identityPolicies
- }
- }
-
- return resp, nil
-}
-
-func (ts *TokenStore) handleRenewSelf(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- data.Raw["token"] = req.ClientToken
- return ts.handleRenew(ctx, req, data)
-}
-
-// handleRenew handles the auth/token/renew/id path for renewal of tokens.
-// This is used to prevent token expiration and revocation.
-func (ts *TokenStore) handleRenew(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- id := data.Get("token").(string)
- if id == "" {
- return logical.ErrorResponse("missing token ID"), logical.ErrInvalidRequest
- }
- incrementRaw := data.Get("increment").(int)
-
- // Convert the increment
- increment := time.Duration(incrementRaw) * time.Second
-
- // Lookup the token
- te, err := ts.Lookup(ctx, id)
- if err != nil {
- return nil, errwrap.Wrapf("error looking up token to renew: {{err}}", err)
- }
- if te == nil {
- return logical.ErrorResponse("token not found"), logical.ErrInvalidRequest
- }
-
- var resp *logical.Response
-
- if te.Type == logical.TokenTypeBatch {
- return logical.ErrorResponse("batch tokens cannot be renewed"), nil
- }
-
- // Renew the token and its children
- resp, err = ts.expiration.RenewToken(ctx, req, te, increment)
-
- return resp, err
-}
-
-func (ts *TokenStore) authRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- if req.Auth == nil {
- return nil, fmt.Errorf("request auth is nil")
- }
-
- te, err := ts.Lookup(ctx, req.Auth.ClientToken)
- if err != nil {
- return nil, errwrap.Wrapf("error looking up token: {{err}}", err)
- }
- if te == nil {
- return nil, fmt.Errorf("no token entry found during lookup")
- }
-
- if te.Role == "" {
- req.Auth.Period = te.Period
- req.Auth.ExplicitMaxTTL = te.ExplicitMaxTTL
- return &logical.Response{Auth: req.Auth}, nil
- }
-
- role, err := ts.tokenStoreRole(ctx, te.Role)
- if err != nil {
- return nil, errwrap.Wrapf(fmt.Sprintf("error looking up role %q: {{err}}", te.Role), err)
- }
- if role == nil {
- return nil, fmt.Errorf("original token role %q could not be found, not renewing", te.Role)
- }
-
- req.Auth.Period = role.Period
- req.Auth.ExplicitMaxTTL = role.ExplicitMaxTTL
- return &logical.Response{Auth: req.Auth}, nil
-}
-
-func (ts *TokenStore) tokenStoreRole(ctx context.Context, name string) (*tsRoleEntry, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- entry, err := ts.rolesView(ns).Get(ctx, name)
- if err != nil {
- return nil, err
- }
- if entry == nil {
- return nil, nil
- }
-
- var result tsRoleEntry
- if err := entry.DecodeJSON(&result); err != nil {
- return nil, err
- }
-
- if result.TokenType == logical.TokenTypeDefault {
- result.TokenType = logical.TokenTypeDefaultService
- }
-
- return &result, nil
-}
-
-func (ts *TokenStore) tokenStoreRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- entries, err := ts.rolesView(ns).List(ctx, "")
- if err != nil {
- return nil, err
- }
-
- ret := make([]string, len(entries))
- for i, entry := range entries {
- ret[i] = strings.TrimPrefix(entry, rolesPrefix)
- }
-
- return logical.ListResponse(ret), nil
-}
-
-func (ts *TokenStore) tokenStoreRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- err = ts.rolesView(ns).Delete(ctx, data.Get("role_name").(string))
- if err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (ts *TokenStore) tokenStoreRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- role, err := ts.tokenStoreRole(ctx, data.Get("role_name").(string))
- if err != nil {
- return nil, err
- }
- if role == nil {
- return nil, nil
- }
-
- resp := &logical.Response{
- Data: map[string]interface{}{
- "period": int64(role.Period.Seconds()),
- "explicit_max_ttl": int64(role.ExplicitMaxTTL.Seconds()),
- "disallowed_policies": role.DisallowedPolicies,
- "allowed_policies": role.AllowedPolicies,
- "name": role.Name,
- "orphan": role.Orphan,
- "path_suffix": role.PathSuffix,
- "renewable": role.Renewable,
- "token_type": role.TokenType.String(),
- },
- }
-
- if len(role.BoundCIDRs) > 0 {
- resp.Data["bound_cidrs"] = role.BoundCIDRs
- }
-
- return resp, nil
-}
-
-func (ts *TokenStore) tokenStoreRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
- name := data.Get("role_name").(string)
- if name == "" {
- return false, fmt.Errorf("role name cannot be empty")
- }
- role, err := ts.tokenStoreRole(ctx, name)
- if err != nil {
- return false, err
- }
-
- return role != nil, nil
-}
-
-func (ts *TokenStore) tokenStoreRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
- name := data.Get("role_name").(string)
- if name == "" {
- return logical.ErrorResponse("role name cannot be empty"), nil
- }
- entry, err := ts.tokenStoreRole(ctx, name)
- if err != nil {
- return nil, err
- }
-
- // Due to the existence check, entry will only be nil if it's a create
- // operation, so just create a new one
- if entry == nil {
- entry = &tsRoleEntry{
- Name: name,
- }
- }
-
- // In this series of blocks, if we do not find a user-provided value and
- // it's a creation operation, we call data.Get to get the appropriate
- // default
-
- orphanInt, ok := data.GetOk("orphan")
- if ok {
- entry.Orphan = orphanInt.(bool)
- } else if req.Operation == logical.CreateOperation {
- entry.Orphan = data.Get("orphan").(bool)
- }
-
- periodInt, ok := data.GetOk("period")
- if ok {
- entry.Period = time.Second * time.Duration(periodInt.(int))
- } else if req.Operation == logical.CreateOperation {
- entry.Period = time.Second * time.Duration(data.Get("period").(int))
- }
-
- renewableInt, ok := data.GetOk("renewable")
- if ok {
- entry.Renewable = renewableInt.(bool)
- } else if req.Operation == logical.CreateOperation {
- entry.Renewable = data.Get("renewable").(bool)
- }
-
- boundCIDRsRaw, ok := data.GetOk("bound_cidrs")
- if ok {
- boundCIDRs := boundCIDRsRaw.([]string)
- if len(boundCIDRs) > 0 {
- var parsedCIDRs []*sockaddr.SockAddrMarshaler
- for _, v := range boundCIDRs {
- parsedCIDR, err := sockaddr.NewSockAddr(v)
- if err != nil {
- return logical.ErrorResponse(errwrap.Wrapf(fmt.Sprintf("invalid value %q when parsing bound cidrs: {{err}}", v), err).Error()), nil
- }
- parsedCIDRs = append(parsedCIDRs, &sockaddr.SockAddrMarshaler{parsedCIDR})
- }
- entry.BoundCIDRs = parsedCIDRs
- }
- }
-
- var resp *logical.Response
-
- explicitMaxTTLInt, ok := data.GetOk("explicit_max_ttl")
- if ok {
- entry.ExplicitMaxTTL = time.Second * time.Duration(explicitMaxTTLInt.(int))
- } else if req.Operation == logical.CreateOperation {
- entry.ExplicitMaxTTL = time.Second * time.Duration(data.Get("explicit_max_ttl").(int))
- }
- if entry.ExplicitMaxTTL != 0 {
- sysView := ts.System()
-
- if sysView.MaxLeaseTTL() != time.Duration(0) && entry.ExplicitMaxTTL > sysView.MaxLeaseTTL() {
- if resp == nil {
- resp = &logical.Response{}
- }
- resp.AddWarning(fmt.Sprintf(
- "Given explicit max TTL of %d is greater than system/mount allowed value of %d seconds; until this is fixed attempting to create tokens against this role will result in an error",
- int64(entry.ExplicitMaxTTL.Seconds()), int64(sysView.MaxLeaseTTL().Seconds())))
- }
- }
-
- pathSuffixInt, ok := data.GetOk("path_suffix")
- if ok {
- pathSuffix := pathSuffixInt.(string)
- if pathSuffix != "" {
- matched := pathSuffixSanitize.MatchString(pathSuffix)
- if !matched {
- return logical.ErrorResponse(fmt.Sprintf(
- "given role path suffix contains invalid characters; must match %s",
- pathSuffixSanitize.String())), nil
- }
- entry.PathSuffix = pathSuffix
- }
- } else if req.Operation == logical.CreateOperation {
- entry.PathSuffix = data.Get("path_suffix").(string)
- }
-
- if strings.Contains(entry.PathSuffix, "..") {
- return logical.ErrorResponse(fmt.Sprintf("error registering path suffix: %s", consts.ErrPathContainsParentReferences)), nil
- }
-
- allowedPoliciesRaw, ok := data.GetOk("allowed_policies")
- if ok {
- entry.AllowedPolicies = policyutil.SanitizePolicies(allowedPoliciesRaw.([]string), policyutil.DoNotAddDefaultPolicy)
- } else if req.Operation == logical.CreateOperation {
- entry.AllowedPolicies = policyutil.SanitizePolicies(data.Get("allowed_policies").([]string), policyutil.DoNotAddDefaultPolicy)
- }
-
- disallowedPoliciesRaw, ok := data.GetOk("disallowed_policies")
- if ok {
- entry.DisallowedPolicies = strutil.RemoveDuplicates(disallowedPoliciesRaw.([]string), true)
- } else if req.Operation == logical.CreateOperation {
- entry.DisallowedPolicies = strutil.RemoveDuplicates(data.Get("disallowed_policies").([]string), true)
- }
-
- tokenType := entry.TokenType
- if tokenType == logical.TokenTypeDefault {
- tokenType = logical.TokenTypeDefaultService
- }
- tokenTypeRaw, ok := data.GetOk("token_type")
- if ok {
- tokenTypeStr := tokenTypeRaw.(string)
- switch tokenTypeStr {
- case "service":
- tokenType = logical.TokenTypeService
- case "batch":
- tokenType = logical.TokenTypeBatch
- case "default-service":
- tokenType = logical.TokenTypeDefaultService
- case "default-batch":
- tokenType = logical.TokenTypeDefaultBatch
- default:
- return logical.ErrorResponse(fmt.Sprintf("invalid 'token_type' value %q", tokenTypeStr)), nil
- }
- } else if req.Operation == logical.CreateOperation {
- tokenType = logical.TokenTypeDefaultService
- }
- entry.TokenType = tokenType
-
- if entry.TokenType == logical.TokenTypeBatch {
- if !entry.Orphan {
- return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate non-orphan tokens"), nil
- }
- if entry.Period != 0 {
- return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate periodic tokens"), nil
- }
- if entry.Renewable {
- return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate renewable tokens"), nil
- }
- if entry.ExplicitMaxTTL != 0 {
- return logical.ErrorResponse("'token_type' cannot be 'batch' when role is set to generate tokens with an explicit max TTL"), nil
- }
- }
-
- ns, err := namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
-
- // Store it
- jsonEntry, err := logical.StorageEntryJSON(name, entry)
- if err != nil {
- return nil, err
- }
- if err := ts.rolesView(ns).Put(ctx, jsonEntry); err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-const (
- tokenTidyHelp = `
-This endpoint performs cleanup tasks that can be run if certain error
-conditions have occurred.
-`
- tokenTidyDesc = `
-This endpoint performs cleanup tasks that can be run to clean up token and
-lease entries after certain error conditions. Usually running this is not
-necessary, and is only required if upgrade notes or support personnel suggest
-it.
-`
- tokenBackendHelp = `The token credential backend is always enabled and builtin to Vault.
-Client tokens are used to identify a client and to allow Vault to associate policies and ACLs
-which are enforced on every request. This backend also allows for generating sub-tokens as well
-as revocation of tokens. The tokens are renewable if associated with a lease.`
- tokenCreateHelp = `The token create path is used to create new tokens.`
- tokenCreateOrphanHelp = `The token create path is used to create new orphan tokens.`
- tokenCreateRoleHelp = `This token create path is used to create new tokens adhering to the given role.`
- tokenListRolesHelp = `This endpoint lists configured roles.`
- tokenLookupAccessorHelp = `This endpoint will lookup a token associated with the given accessor and its properties. Response will not contain the token ID.`
- tokenLookupHelp = `This endpoint will lookup a token and its properties.`
- tokenPathRolesHelp = `This endpoint allows creating, reading, and deleting roles.`
- tokenRevokeAccessorHelp = `This endpoint will delete the token associated with the accessor and all of its child tokens.`
- tokenRevokeHelp = `This endpoint will delete the given token and all of its child tokens.`
- tokenRevokeSelfHelp = `This endpoint will delete the token used to call it and all of its child tokens.`
- tokenRevokeOrphanHelp = `This endpoint will delete the token and orphan its child tokens.`
- tokenRenewHelp = `This endpoint will renew the given token and prevent expiration.`
- tokenRenewSelfHelp = `This endpoint will renew the token used to call it and prevent expiration.`
- tokenAllowedPoliciesHelp = `If set, tokens can be created with any subset of the policies in this
-list, rather than the normal semantics of tokens being a subset of the
-calling token's policies. The parameter is a comma-delimited string of
-policy names.`
- tokenDisallowedPoliciesHelp = `If set, successful token creation via this role will require that
-no policies in the given list are requested. The parameter is a comma-delimited string of policy names.`
- tokenOrphanHelp = `If true, tokens created via this role
-will be orphan tokens (have no parent)`
- tokenPeriodHelp = `If set, tokens created via this role
-will have no max lifetime; instead, their
-renewal period will be fixed to this value.
-This takes an integer number of seconds,
-or a string duration (e.g. "24h").`
- tokenPathSuffixHelp = `If set, tokens created via this role
-will contain the given suffix as a part of
-their path. This can be used to assist use
-of the 'revoke-prefix' endpoint later on.
-The given suffix must match the regular
-expression.`
- tokenExplicitMaxTTLHelp = `If set, tokens created via this role
-carry an explicit maximum TTL. During renewal,
-the current maximum TTL values of the role
-and the mount are not checked for changes,
-and any updates to these values will have
-no effect on the token being renewed.`
- tokenRenewableHelp = `Tokens created via this role will be
-renewable or not according to this value.
-Defaults to "true".`
- tokenListAccessorsHelp = `List token accessors, which can then be
-be used to iterate and discover their properties
-or revoke them. Because this can be used to
-cause a denial of service, this endpoint
-requires 'sudo' capability in addition to
-'list'.`
-)
diff --git a/vendor/github.com/hashicorp/vault/vault/token_store_util.go b/vendor/github.com/hashicorp/vault/vault/token_store_util.go
deleted file mode 100644
index ca1f39a1..00000000
--- a/vendor/github.com/hashicorp/vault/vault/token_store_util.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "github.com/hashicorp/vault/helper/namespace"
-)
-
-func (ts *TokenStore) baseView(ns *namespace.Namespace) *BarrierView {
- return ts.baseBarrierView
-}
-
-func (ts *TokenStore) idView(ns *namespace.Namespace) *BarrierView {
- return ts.idBarrierView
-}
-
-func (ts *TokenStore) accessorView(ns *namespace.Namespace) *BarrierView {
- return ts.accessorBarrierView
-}
-
-func (ts *TokenStore) parentView(ns *namespace.Namespace) *BarrierView {
- return ts.parentBarrierView
-}
-
-func (ts *TokenStore) rolesView(ns *namespace.Namespace) *BarrierView {
- return ts.rolesBarrierView
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/ui.go b/vendor/github.com/hashicorp/vault/vault/ui.go
deleted file mode 100644
index 7a637f20..00000000
--- a/vendor/github.com/hashicorp/vault/vault/ui.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package vault
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "strings"
- "sync"
-
- "github.com/hashicorp/vault/logical"
- "github.com/hashicorp/vault/physical"
-)
-
-const (
- uiConfigKey = "config"
- uiConfigPlaintextKey = "config_plaintext"
-)
-
-// UIConfig contains UI configuration. This takes both a physical view and a barrier view
-// because it is stored in both plaintext and encrypted to allow for getting the header
-// values before the barrier is unsealed
-type UIConfig struct {
- l sync.RWMutex
- physicalStorage physical.Backend
- barrierStorage logical.Storage
-
- enabled bool
- defaultHeaders http.Header
-}
-
-// NewUIConfig creates a new UI config
-func NewUIConfig(enabled bool, physicalStorage physical.Backend, barrierStorage logical.Storage) *UIConfig {
- defaultHeaders := http.Header{}
- defaultHeaders.Set("Content-Security-Policy", "default-src 'none'; connect-src 'self'; img-src 'self' data:; script-src 'self'; style-src 'unsafe-inline' 'self'; form-action 'none'; frame-ancestors 'none'")
-
- return &UIConfig{
- physicalStorage: physicalStorage,
- barrierStorage: barrierStorage,
- enabled: enabled,
- defaultHeaders: defaultHeaders,
- }
-}
-
-// Enabled returns if the UI is enabled
-func (c *UIConfig) Enabled() bool {
- c.l.RLock()
- defer c.l.RUnlock()
- return c.enabled
-}
-
-// Headers returns the response headers that should be returned in the UI
-func (c *UIConfig) Headers(ctx context.Context) (http.Header, error) {
- c.l.RLock()
- defer c.l.RUnlock()
-
- config, err := c.get(ctx)
- if err != nil {
- return nil, err
- }
- headers := make(http.Header)
- if config != nil {
- headers = config.Headers
- }
-
- for k := range c.defaultHeaders {
- if headers.Get(k) == "" {
- v := c.defaultHeaders.Get(k)
- headers.Set(k, v)
- }
- }
- return headers, nil
-}
-
-// HeaderKeys returns the list of the configured headers
-func (c *UIConfig) HeaderKeys(ctx context.Context) ([]string, error) {
- c.l.RLock()
- defer c.l.RUnlock()
-
- config, err := c.get(ctx)
- if err != nil {
- return nil, err
- }
- if config == nil {
- return nil, nil
- }
- var keys []string
- for k := range config.Headers {
- keys = append(keys, k)
- }
- return keys, nil
-}
-
-// GetHeader retrieves the configured value for the given header
-func (c *UIConfig) GetHeader(ctx context.Context, header string) (string, error) {
- c.l.RLock()
- defer c.l.RUnlock()
-
- config, err := c.get(ctx)
- if err != nil {
- return "", err
- }
- if config == nil {
- return "", nil
- }
-
- value := config.Headers.Get(header)
- return value, nil
-}
-
-// SetHeader sets the value for the given header
-func (c *UIConfig) SetHeader(ctx context.Context, header, value string) error {
- c.l.Lock()
- defer c.l.Unlock()
-
- config, err := c.get(ctx)
- if err != nil {
- return err
- }
- if config == nil {
- config = &uiConfigEntry{
- Headers: http.Header{},
- }
- }
- config.Headers.Set(header, value)
- return c.save(ctx, config)
-}
-
-// DeleteHeader deletes the header configuration for the given header
-func (c *UIConfig) DeleteHeader(ctx context.Context, header string) error {
- c.l.Lock()
- defer c.l.Unlock()
-
- config, err := c.get(ctx)
- if err != nil {
- return err
- }
- if config == nil {
- return nil
- }
-
- config.Headers.Del(header)
- return c.save(ctx, config)
-}
-
-func (c *UIConfig) get(ctx context.Context) (*uiConfigEntry, error) {
- // Read plaintext always to ensure in sync with barrier value
- plaintextConfigRaw, err := c.physicalStorage.Get(ctx, uiConfigPlaintextKey)
- if err != nil {
- return nil, err
- }
-
- configRaw, err := c.barrierStorage.Get(ctx, uiConfigKey)
- if err == nil {
- if configRaw == nil {
- return nil, nil
- }
- config := new(uiConfigEntry)
- if err := json.Unmarshal(configRaw.Value, config); err != nil {
- return nil, err
- }
- // Check that plaintext value matches barrier value, if not sync values
- if plaintextConfigRaw == nil || bytes.Compare(plaintextConfigRaw.Value, configRaw.Value) != 0 {
- if err := c.save(ctx, config); err != nil {
- return nil, err
- }
- }
- return config, nil
- }
-
- // Respond with error if not sealed
- if !strings.Contains(err.Error(), ErrBarrierSealed.Error()) {
- return nil, err
- }
-
- // Respond with plaintext value
- if configRaw == nil {
- return nil, nil
- }
- config := new(uiConfigEntry)
- if err := json.Unmarshal(plaintextConfigRaw.Value, config); err != nil {
- return nil, err
- }
- return config, nil
-}
-
-func (c *UIConfig) save(ctx context.Context, config *uiConfigEntry) error {
- if len(config.Headers) == 0 {
- if err := c.physicalStorage.Delete(ctx, uiConfigPlaintextKey); err != nil {
- return err
- }
- return c.barrierStorage.Delete(ctx, uiConfigKey)
- }
-
- configRaw, err := json.Marshal(config)
- if err != nil {
- return err
- }
-
- entry := &physical.Entry{
- Key: uiConfigPlaintextKey,
- Value: configRaw,
- }
- if err := c.physicalStorage.Put(ctx, entry); err != nil {
- return err
- }
-
- barrEntry := &logical.StorageEntry{
- Key: uiConfigKey,
- Value: configRaw,
- }
- return c.barrierStorage.Put(ctx, barrEntry)
-}
-
-type uiConfigEntry struct {
- Headers http.Header `json:"headers"`
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/util.go b/vendor/github.com/hashicorp/vault/vault/util.go
deleted file mode 100644
index 9e03afd2..00000000
--- a/vendor/github.com/hashicorp/vault/vault/util.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package vault
-
-import (
- "crypto/rand"
- "fmt"
-)
-
-// memzero is used to zero out a byte buffer. This specific format is optimized
-// by the compiler to use memclr to improve performance. See this code review:
-// https://codereview.appspot.com/137880043
-//
-// Use of memzero is not a guarantee against memory analysis as described in
-// the Vault threat model:
-// https://www.vaultproject.io/docs/internals/security.html . Vault does not
-// provide guarantees against memory analysis or raw memory dumping by
-// operators, however it does minimize this exposure by zeroing out buffers
-// that contain secrets as soon as they are no longer used. Starting with Go
-// 1.5, the garbage collector was changed to become a "generational copying
-// garbage collector." This change to the garbage collector makes it
-// impossible for Vault to guarantee a buffer with a secret has not been
-// copied during a garbage collection. It is therefore possible that secrets
-// may be exist in memory that have not been wiped despite a pending memzero
-// call. Over time any copied data with a secret will be reused and the
-// memory overwritten thereby mitigating some of the risk from this threat
-// vector.
-func memzero(b []byte) {
- if b == nil {
- return
- }
- for i := range b {
- b[i] = 0
- }
-}
-
-// randbytes is used to create a buffer of size n filled with random bytes
-func randbytes(n int) []byte {
- buf := make([]byte, n)
- if _, err := rand.Read(buf); err != nil {
- panic(fmt.Sprintf("failed to generate %d random bytes: %v", n, err))
- }
- return buf
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/wrapping.go b/vendor/github.com/hashicorp/vault/vault/wrapping.go
deleted file mode 100644
index 81c750a0..00000000
--- a/vendor/github.com/hashicorp/vault/vault/wrapping.go
+++ /dev/null
@@ -1,376 +0,0 @@
-package vault
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "encoding/json"
- "fmt"
- "strings"
- "time"
-
- "github.com/SermoDigital/jose/crypto"
- "github.com/SermoDigital/jose/jws"
- "github.com/SermoDigital/jose/jwt"
- "github.com/hashicorp/errwrap"
- "github.com/hashicorp/vault/helper/consts"
- "github.com/hashicorp/vault/helper/jsonutil"
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/logical"
-)
-
-const (
- // The location of the key used to generate response-wrapping JWTs
- coreWrappingJWTKeyPath = "core/wrapping/jwtkey"
-)
-
-func (c *Core) ensureWrappingKey(ctx context.Context) error {
- entry, err := c.barrier.Get(ctx, coreWrappingJWTKeyPath)
- if err != nil {
- return err
- }
-
- var keyParams clusterKeyParams
-
- if entry == nil {
- key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
- if err != nil {
- return errwrap.Wrapf("failed to generate wrapping key: {{err}}", err)
- }
- keyParams.D = key.D
- keyParams.X = key.X
- keyParams.Y = key.Y
- keyParams.Type = corePrivateKeyTypeP521
- val, err := jsonutil.EncodeJSON(keyParams)
- if err != nil {
- return errwrap.Wrapf("failed to encode wrapping key: {{err}}", err)
- }
- entry = &Entry{
- Key: coreWrappingJWTKeyPath,
- Value: val,
- }
- if err = c.barrier.Put(ctx, entry); err != nil {
- return errwrap.Wrapf("failed to store wrapping key: {{err}}", err)
- }
- }
-
- // Redundant if we just created it, but in this case serves as a check anyways
- if err = jsonutil.DecodeJSON(entry.Value, &keyParams); err != nil {
- return errwrap.Wrapf("failed to decode wrapping key parameters: {{err}}", err)
- }
-
- c.wrappingJWTKey = &ecdsa.PrivateKey{
- PublicKey: ecdsa.PublicKey{
- Curve: elliptic.P521(),
- X: keyParams.X,
- Y: keyParams.Y,
- },
- D: keyParams.D,
- }
-
- c.logger.Info("loaded wrapping token key")
-
- return nil
-}
-
-func (c *Core) wrapInCubbyhole(ctx context.Context, req *logical.Request, resp *logical.Response, auth *logical.Auth) (*logical.Response, error) {
- if c.perfStandby {
- return forwardWrapRequest(ctx, c, req, resp, auth)
- }
-
- // Before wrapping, obey special rules for listing: if no entries are
- // found, 404. This prevents unwrapping only to find empty data.
- if req.Operation == logical.ListOperation {
- if resp == nil || (len(resp.Data) == 0 && len(resp.Warnings) == 0) {
- return nil, logical.ErrUnsupportedPath
- }
-
- keysRaw, ok := resp.Data["keys"]
- if !ok || keysRaw == nil {
- if len(resp.Data) > 0 || len(resp.Warnings) > 0 {
- // We could be returning extra metadata on a list, or returning
- // warnings with no data, so handle these cases
- goto DONELISTHANDLING
- }
- return nil, logical.ErrUnsupportedPath
- }
-
- keys, ok := keysRaw.([]string)
- if !ok {
- return nil, logical.ErrUnsupportedPath
- }
- if len(keys) == 0 {
- return nil, logical.ErrUnsupportedPath
- }
- }
-
-DONELISTHANDLING:
- var err error
- sealWrap := resp.WrapInfo.SealWrap
-
- var ns *namespace.Namespace
- // If we are creating a JWT wrapping token we always want them to live in
- // the root namespace. These are only used for replication and plugin setup.
- switch resp.WrapInfo.Format {
- case "jwt":
- ns = namespace.RootNamespace
- ctx = namespace.ContextWithNamespace(ctx, ns)
- default:
- ns, err = namespace.FromContext(ctx)
- if err != nil {
- return nil, err
- }
- }
-
- // If we are wrapping, the first part (performed in this functions) happens
- // before auditing so that resp.WrapInfo.Token can contain the HMAC'd
- // wrapping token ID in the audit logs, so that it can be determined from
- // the audit logs whether the token was ever actually used.
- creationTime := time.Now()
- te := logical.TokenEntry{
- Path: req.Path,
- Policies: []string{"response-wrapping"},
- CreationTime: creationTime.Unix(),
- TTL: resp.WrapInfo.TTL,
- NumUses: 1,
- ExplicitMaxTTL: resp.WrapInfo.TTL,
- NamespaceID: ns.ID,
- }
-
- if err := c.tokenStore.create(ctx, &te); err != nil {
- c.logger.Error("failed to create wrapping token", "error", err)
- return nil, ErrInternalError
- }
-
- resp.WrapInfo.Token = te.ID
- resp.WrapInfo.Accessor = te.Accessor
- resp.WrapInfo.CreationTime = creationTime
- // If this is not a rewrap, store the request path as creation_path
- if req.Path != "sys/wrapping/rewrap" {
- resp.WrapInfo.CreationPath = req.Path
- }
-
- if auth != nil && auth.EntityID != "" {
- resp.WrapInfo.WrappedEntityID = auth.EntityID
- }
-
- // This will only be non-nil if this response contains a token, so in that
- // case put the accessor in the wrap info.
- if resp.Auth != nil {
- resp.WrapInfo.WrappedAccessor = resp.Auth.Accessor
- }
-
- switch resp.WrapInfo.Format {
- case "jwt":
- // Create the JWT
- claims := jws.Claims{}
- // Map the JWT ID to the token ID for ease of use
- claims.SetJWTID(te.ID)
- // Set the issue time to the creation time
- claims.SetIssuedAt(creationTime)
- // Set the expiration to the TTL
- claims.SetExpiration(creationTime.Add(resp.WrapInfo.TTL))
- if resp.Auth != nil {
- claims.Set("accessor", resp.Auth.Accessor)
- }
- claims.Set("type", "wrapping")
- claims.Set("addr", c.redirectAddr)
- jwt := jws.NewJWT(claims, crypto.SigningMethodES512)
- serWebToken, err := jwt.Serialize(c.wrappingJWTKey)
- if err != nil {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to serialize JWT", "error", err)
- return nil, ErrInternalError
- }
- resp.WrapInfo.Token = string(serWebToken)
- if c.redirectAddr == "" {
- resp.AddWarning("No redirect address set in Vault so none could be encoded in the token. You may need to supply Vault's API address when unwrapping the token.")
- }
- }
-
- cubbyReq := &logical.Request{
- Operation: logical.CreateOperation,
- Path: "cubbyhole/response",
- ClientToken: te.ID,
- }
- if sealWrap {
- cubbyReq.WrapInfo = &logical.RequestWrapInfo{
- SealWrap: true,
- }
- }
- cubbyReq.SetTokenEntry(&te)
-
- // During a rewrap, store the original response, don't wrap it again.
- if req.Path == "sys/wrapping/rewrap" {
- cubbyReq.Data = map[string]interface{}{
- "response": resp.Data["response"],
- }
- } else {
- httpResponse := logical.LogicalResponseToHTTPResponse(resp)
-
- // Add the unique identifier of the original request to the response
- httpResponse.RequestID = req.ID
-
- // Because of the way that JSON encodes (likely just in Go) we actually get
- // mixed-up values for ints if we simply put this object in the response
- // and encode the whole thing; so instead we marshal it first, then store
- // the string response. This actually ends up making it easier on the
- // client side, too, as it becomes a straight read-string-pass-to-unmarshal
- // operation.
-
- marshaledResponse, err := json.Marshal(httpResponse)
- if err != nil {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to marshal wrapped response", "error", err)
- return nil, ErrInternalError
- }
-
- cubbyReq.Data = map[string]interface{}{
- "response": string(marshaledResponse),
- }
- }
-
- cubbyResp, err := c.router.Route(ctx, cubbyReq)
- if err != nil {
- // Revoke since it's not yet being tracked for expiration
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to store wrapped response information", "error", err)
- return nil, ErrInternalError
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to store wrapped response information", "error", cubbyResp.Data["error"])
- return cubbyResp, nil
- }
-
- // Store info for lookup
- cubbyReq.WrapInfo = nil
- cubbyReq.Path = "cubbyhole/wrapinfo"
- cubbyReq.Data = map[string]interface{}{
- "creation_ttl": resp.WrapInfo.TTL,
- "creation_time": creationTime,
- }
- // Store creation_path if not a rewrap
- if req.Path != "sys/wrapping/rewrap" {
- cubbyReq.Data["creation_path"] = req.Path
- } else {
- cubbyReq.Data["creation_path"] = resp.WrapInfo.CreationPath
- }
- cubbyResp, err = c.router.Route(ctx, cubbyReq)
- if err != nil {
- // Revoke since it's not yet being tracked for expiration
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to store wrapping information", "error", err)
- return nil, ErrInternalError
- }
- if cubbyResp != nil && cubbyResp.IsError() {
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to store wrapping information", "error", cubbyResp.Data["error"])
- return cubbyResp, nil
- }
-
- wAuth := &logical.Auth{
- ClientToken: te.ID,
- Policies: []string{"response-wrapping"},
- LeaseOptions: logical.LeaseOptions{
- TTL: te.TTL,
- Renewable: false,
- },
- }
-
- // Register the wrapped token with the expiration manager
- if err := c.expiration.RegisterAuth(ctx, &te, wAuth); err != nil {
- // Revoke since it's not yet being tracked for expiration
- c.tokenStore.revokeOrphan(ctx, te.ID)
- c.logger.Error("failed to register cubbyhole wrapping token lease", "request_path", req.Path, "error", err)
- return nil, ErrInternalError
- }
-
- return nil, nil
-}
-
-// ValidateWrappingToken checks whether a token is a wrapping token.
-func (c *Core) ValidateWrappingToken(ctx context.Context, req *logical.Request) (bool, error) {
- if req == nil {
- return false, fmt.Errorf("invalid request")
- }
-
- var err error
-
- var token string
- var thirdParty bool
- if req.Data != nil && req.Data["token"] != nil {
- thirdParty = true
- if tokenStr, ok := req.Data["token"].(string); !ok {
- return false, fmt.Errorf("could not decode token in request body")
- } else if tokenStr == "" {
- return false, fmt.Errorf("empty token in request body")
- } else {
- token = tokenStr
- }
- } else {
- token = req.ClientToken
- }
-
- // Check for it being a JWT. If it is, and it is valid, we extract the
- // internal client token from it and use that during lookup.
- if strings.Count(token, ".") == 2 {
- wt, err := jws.ParseJWT([]byte(token))
- // If there's an error we simply fall back to attempting to use it as a regular token
- if err == nil && wt != nil {
- validator := &jwt.Validator{}
- validator.SetClaim("type", "wrapping")
- if err = wt.Validate(&c.wrappingJWTKey.PublicKey, crypto.SigningMethodES512, []*jwt.Validator{validator}...); err != nil {
- return false, errwrap.Wrapf("wrapping token signature could not be validated: {{err}}", err)
- }
- token, _ = wt.Claims().JWTID()
- // We override the given request client token so that the rest of
- // Vault sees the real value. This also ensures audit logs are
- // consistent with the actual token that was issued.
- if !thirdParty {
- req.ClientToken = token
- } else {
- req.Data["token"] = token
- }
- }
- }
-
- if token == "" {
- return false, fmt.Errorf("token is empty")
- }
-
- if c.Sealed() {
- return false, consts.ErrSealed
- }
-
- c.stateLock.RLock()
- defer c.stateLock.RUnlock()
- if c.standby && !c.perfStandby {
- return false, consts.ErrStandby
- }
-
- te, err := c.tokenStore.Lookup(ctx, token)
- if err != nil {
- return false, err
- }
- if te == nil {
- return false, nil
- }
-
- if len(te.Policies) != 1 {
- return false, nil
- }
-
- if te.Policies[0] != responseWrappingPolicyName && te.Policies[0] != controlGroupPolicyName {
- return false, nil
- }
-
- if !thirdParty {
- req.ClientTokenAccessor = te.Accessor
- req.ClientTokenRemainingUses = te.NumUses
- req.SetTokenEntry(te)
- }
-
- return true, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/vault/wrapping_util.go b/vendor/github.com/hashicorp/vault/vault/wrapping_util.go
deleted file mode 100644
index 475fd35e..00000000
--- a/vendor/github.com/hashicorp/vault/vault/wrapping_util.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !enterprise
-
-package vault
-
-import (
- "context"
-
- "github.com/hashicorp/vault/logical"
-)
-
-func forwardWrapRequest(context.Context, *Core, *logical.Request, *logical.Response, *logical.Auth) (*logical.Response, error) {
- return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/vault/version/cgo.go b/vendor/github.com/hashicorp/vault/version/cgo.go
deleted file mode 100644
index 2ed493a1..00000000
--- a/vendor/github.com/hashicorp/vault/version/cgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build cgo
-
-package version
-
-func init() {
- CgoEnabled = true
-}
diff --git a/vendor/github.com/hashicorp/vault/version/version.go b/vendor/github.com/hashicorp/vault/version/version.go
deleted file mode 100644
index 0f819333..00000000
--- a/vendor/github.com/hashicorp/vault/version/version.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package version
-
-import (
- "bytes"
- "fmt"
-)
-
-var (
- // The git commit that was compiled. This will be filled in by the compiler.
- GitCommit string
- GitDescribe string
-
- // Whether cgo is enabled or not; set at build time
- CgoEnabled bool
-
- Version = "unknown"
- VersionPrerelease = "unknown"
- VersionMetadata = ""
-)
-
-// VersionInfo
-type VersionInfo struct {
- Revision string
- Version string
- VersionPrerelease string
- VersionMetadata string
-}
-
-func GetVersion() *VersionInfo {
- ver := Version
- rel := VersionPrerelease
- md := VersionMetadata
- if GitDescribe != "" {
- ver = GitDescribe
- }
- if GitDescribe == "" && rel == "" && VersionPrerelease != "" {
- rel = "dev"
- }
-
- return &VersionInfo{
- Revision: GitCommit,
- Version: ver,
- VersionPrerelease: rel,
- VersionMetadata: md,
- }
-}
-
-func (c *VersionInfo) VersionNumber() string {
- if Version == "unknown" && VersionPrerelease == "unknown" {
- return "(version unknown)"
- }
-
- version := fmt.Sprintf("%s", c.Version)
-
- if c.VersionPrerelease != "" {
- version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease)
- }
-
- if c.VersionMetadata != "" {
- version = fmt.Sprintf("%s+%s", version, c.VersionMetadata)
- }
-
- return version
-}
-
-func (c *VersionInfo) FullVersionNumber(rev bool) string {
- var versionString bytes.Buffer
-
- if Version == "unknown" && VersionPrerelease == "unknown" {
- return "Vault (version unknown)"
- }
-
- fmt.Fprintf(&versionString, "Vault v%s", c.Version)
- if c.VersionPrerelease != "" {
- fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease)
- }
-
- if c.VersionMetadata != "" {
- fmt.Fprintf(&versionString, "+%s", c.VersionMetadata)
- }
-
- if rev && c.Revision != "" {
- fmt.Fprintf(&versionString, " (%s)", c.Revision)
- }
-
- return versionString.String()
-}
diff --git a/vendor/github.com/hashicorp/vault/version/version_base.go b/vendor/github.com/hashicorp/vault/version/version_base.go
deleted file mode 100644
index b1a28c8e..00000000
--- a/vendor/github.com/hashicorp/vault/version/version_base.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package version
-
-func init() {
- // The main version number that is being run at the moment.
- Version = "1.0.0"
-
- // A pre-release marker for the version. If this is "" (empty string)
- // then it means that it is a final release. Otherwise, this is a pre-release
- // such as "dev" (in development), "beta", "rc1", etc.
- VersionPrerelease = "rc1"
-}
diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore
deleted file mode 100644
index 83656241..00000000
--- a/vendor/github.com/hashicorp/yamux/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE
deleted file mode 100644
index f0e5c79e..00000000
--- a/vendor/github.com/hashicorp/yamux/LICENSE
+++ /dev/null
@@ -1,362 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the terms of
- a Secondary License.
-
-1.6. "Executable Form"
-
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
- means a work that combines Covered Software with other material, in a
- separate file or files, that is not Covered Software.
-
-1.8. "License"
-
- means this document.
-
-1.9. "Licensable"
-
- means having the right to grant, to the maximum extent possible, whether
- at the time of the initial grant or subsequently, any and all of the
- rights conveyed by this License.
-
-1.10. "Modifications"
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the License,
- by the making, using, selling, offering for sale, having made, import,
- or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, "control" means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution
- become effective for each Contribution on the date the Contributor first
- distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under
- this License. No additional rights or licenses will be implied from the
- distribution or licensing of Covered Software under this License.
- Notwithstanding Section 2.1(b) above, no patent license is granted by a
- Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
- This License does not grant any rights in the trademarks, service marks,
- or logos of any Contributor (except as may be necessary to comply with
- the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this
- License (see Section 10.2) or under the terms of a Secondary License (if
- permitted under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its
- Contributions are its original creation(s) or it has sufficient rights to
- grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under
- applicable copyright doctrines of fair use, fair dealing, or other
- equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under
- the terms of this License. You must inform recipients that the Source
- Code Form of the Covered Software is governed by the terms of this
- License, and how they can obtain a copy of this License. You may not
- attempt to alter or restrict the recipients' rights in the Source Code
- Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter the
- recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for
- the Covered Software. If the Larger Work is a combination of Covered
- Software with a work governed by one or more Secondary Licenses, and the
- Covered Software is not Incompatible With Secondary Licenses, this
- License permits You to additionally distribute such Covered Software
- under the terms of such Secondary License(s), so that the recipient of
- the Larger Work may, at their option, further distribute the Covered
- Software under the terms of either this License or such Secondary
- License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices
- (including copyright notices, patent notices, disclaimers of warranty, or
- limitations of liability) contained within the Source Code Form of the
- Covered Software, except that You may alter any license notices to the
- extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on
- behalf of any Contributor. You must make it absolutely clear that any
- such warranty, support, indemnity, or liability obligation is offered by
- You alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute,
- judicial order, or regulation then You must: (a) comply with the terms of
- this License to the maximum extent possible; and (b) describe the
- limitations and the code they affect. Such description must be placed in a
- text file included with all distributions of the Covered Software under
- this License. Except to the extent prohibited by statute or regulation,
- such description must be sufficiently detailed for a recipient of ordinary
- skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing
- basis, if such Contributor fails to notify You of the non-compliance by
- some reasonable means prior to 60 days after You have come back into
- compliance. Moreover, Your grants from a particular Contributor are
- reinstated on an ongoing basis if such Contributor notifies You of the
- non-compliance by some reasonable means, this is the first time You have
- received notice of non-compliance with this License from such
- Contributor, and You become compliant prior to 30 days after Your receipt
- of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions,
- counter-claims, and cross-claims) alleging that a Contributor Version
- directly or indirectly infringes any patent, then the rights granted to
- You by any and all Contributors for the Covered Software under Section
- 2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an "as is" basis,
- without warranty of any kind, either expressed, implied, or statutory,
- including, without limitation, warranties that the Covered Software is free
- of defects, merchantable, fit for a particular purpose or non-infringing.
- The entire risk as to the quality and performance of the Covered Software
- is with You. Should any Covered Software prove defective in any respect,
- You (not any Contributor) assume the cost of any necessary servicing,
- repair, or correction. This disclaimer of warranty constitutes an essential
- part of this License. No use of any Covered Software is authorized under
- this License except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from
- such party's negligence to the extent applicable law prohibits such
- limitation. Some jurisdictions do not allow the exclusion or limitation of
- incidental or consequential damages, so this exclusion and limitation may
- not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts
- of a jurisdiction where the defendant maintains its principal place of
- business and such litigation shall be governed by laws of that
- jurisdiction, without reference to its conflict-of-law provisions. Nothing
- in this Section shall prevent a party's ability to bring cross-claims or
- counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject
- matter hereof. If any provision of this License is held to be
- unenforceable, such provision shall be reformed only to the extent
- necessary to make it enforceable. Any law or regulation which provides that
- the language of a contract shall be construed against the drafter shall not
- be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version
- of the License under which You originally received the Covered Software,
- or under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a
- modified version of this License if you rename the license and remove
- any references to the name of the license steward (except to note that
- such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
- Licenses If You choose to distribute Source Code Form that is
- Incompatible With Secondary Licenses under the terms of this version of
- the License, the notice described in Exhibit B of this License must be
- attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
- This Source Code Form is "Incompatible
- With Secondary Licenses", as defined by
- the Mozilla Public License, v. 2.0.
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md
deleted file mode 100644
index d4db7fc9..00000000
--- a/vendor/github.com/hashicorp/yamux/README.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# Yamux
-
-Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
-It relies on an underlying connection to provide reliability
-and ordering, such as TCP or Unix domain sockets, and provides
-stream-oriented multiplexing. It is inspired by SPDY but is not
-interoperable with it.
-
-Yamux features include:
-
-* Bi-directional streams
- * Streams can be opened by either client or server
- * Useful for NAT traversal
- * Server-side push support
-* Flow control
- * Avoid starvation
- * Back-pressure to prevent overwhelming a receiver
-* Keep Alives
- * Enables persistent connections over a load balancer
-* Efficient
- * Enables thousands of logical streams with low overhead
-
-## Documentation
-
-For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
-
-## Specification
-
-The full specification for Yamux is provided in the `spec.md` file.
-It can be used as a guide to implementors of interoperable libraries.
-
-## Usage
-
-Using Yamux is remarkably simple:
-
-```go
-
-func client() {
- // Get a TCP connection
- conn, err := net.Dial(...)
- if err != nil {
- panic(err)
- }
-
- // Setup client side of yamux
- session, err := yamux.Client(conn, nil)
- if err != nil {
- panic(err)
- }
-
- // Open a new stream
- stream, err := session.Open()
- if err != nil {
- panic(err)
- }
-
- // Stream implements net.Conn
- stream.Write([]byte("ping"))
-}
-
-func server() {
- // Accept a TCP connection
- conn, err := listener.Accept()
- if err != nil {
- panic(err)
- }
-
- // Setup server side of yamux
- session, err := yamux.Server(conn, nil)
- if err != nil {
- panic(err)
- }
-
- // Accept a stream
- stream, err := session.Accept()
- if err != nil {
- panic(err)
- }
-
- // Listen for a message
- buf := make([]byte, 4)
- stream.Read(buf)
-}
-
-```
-
diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go
deleted file mode 100644
index be6ebca9..00000000
--- a/vendor/github.com/hashicorp/yamux/addr.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package yamux
-
-import (
- "fmt"
- "net"
-)
-
-// hasAddr is used to get the address from the underlying connection
-type hasAddr interface {
- LocalAddr() net.Addr
- RemoteAddr() net.Addr
-}
-
-// yamuxAddr is used when we cannot get the underlying address
-type yamuxAddr struct {
- Addr string
-}
-
-func (*yamuxAddr) Network() string {
- return "yamux"
-}
-
-func (y *yamuxAddr) String() string {
- return fmt.Sprintf("yamux:%s", y.Addr)
-}
-
-// Addr is used to get the address of the listener.
-func (s *Session) Addr() net.Addr {
- return s.LocalAddr()
-}
-
-// LocalAddr is used to get the local address of the
-// underlying connection.
-func (s *Session) LocalAddr() net.Addr {
- addr, ok := s.conn.(hasAddr)
- if !ok {
- return &yamuxAddr{"local"}
- }
- return addr.LocalAddr()
-}
-
-// RemoteAddr is used to get the address of remote end
-// of the underlying connection
-func (s *Session) RemoteAddr() net.Addr {
- addr, ok := s.conn.(hasAddr)
- if !ok {
- return &yamuxAddr{"remote"}
- }
- return addr.RemoteAddr()
-}
-
-// LocalAddr returns the local address
-func (s *Stream) LocalAddr() net.Addr {
- return s.session.LocalAddr()
-}
-
-// LocalAddr returns the remote address
-func (s *Stream) RemoteAddr() net.Addr {
- return s.session.RemoteAddr()
-}
diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go
deleted file mode 100644
index 4f529382..00000000
--- a/vendor/github.com/hashicorp/yamux/const.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package yamux
-
-import (
- "encoding/binary"
- "fmt"
-)
-
-var (
- // ErrInvalidVersion means we received a frame with an
- // invalid version
- ErrInvalidVersion = fmt.Errorf("invalid protocol version")
-
- // ErrInvalidMsgType means we received a frame with an
- // invalid message type
- ErrInvalidMsgType = fmt.Errorf("invalid msg type")
-
- // ErrSessionShutdown is used if there is a shutdown during
- // an operation
- ErrSessionShutdown = fmt.Errorf("session shutdown")
-
- // ErrStreamsExhausted is returned if we have no more
- // stream ids to issue
- ErrStreamsExhausted = fmt.Errorf("streams exhausted")
-
- // ErrDuplicateStream is used if a duplicate stream is
- // opened inbound
- ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
-
- // ErrReceiveWindowExceeded indicates the window was exceeded
- ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
-
- // ErrTimeout is used when we reach an IO deadline
- ErrTimeout = fmt.Errorf("i/o deadline reached")
-
- // ErrStreamClosed is returned when using a closed stream
- ErrStreamClosed = fmt.Errorf("stream closed")
-
- // ErrUnexpectedFlag is set when we get an unexpected flag
- ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
-
- // ErrRemoteGoAway is used when we get a go away from the other side
- ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
-
- // ErrConnectionReset is sent if a stream is reset. This can happen
- // if the backlog is exceeded, or if there was a remote GoAway.
- ErrConnectionReset = fmt.Errorf("connection reset")
-
- // ErrConnectionWriteTimeout indicates that we hit the "safety valve"
- // timeout writing to the underlying stream connection.
- ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
-
- // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
- ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
-)
-
-const (
- // protoVersion is the only version we support
- protoVersion uint8 = 0
-)
-
-const (
- // Data is used for data frames. They are followed
- // by length bytes worth of payload.
- typeData uint8 = iota
-
- // WindowUpdate is used to change the window of
- // a given stream. The length indicates the delta
- // update to the window.
- typeWindowUpdate
-
- // Ping is sent as a keep-alive or to measure
- // the RTT. The StreamID and Length value are echoed
- // back in the response.
- typePing
-
- // GoAway is sent to terminate a session. The StreamID
- // should be 0 and the length is an error code.
- typeGoAway
-)
-
-const (
- // SYN is sent to signal a new stream. May
- // be sent with a data payload
- flagSYN uint16 = 1 << iota
-
- // ACK is sent to acknowledge a new stream. May
- // be sent with a data payload
- flagACK
-
- // FIN is sent to half-close the given stream.
- // May be sent with a data payload.
- flagFIN
-
- // RST is used to hard close a given stream.
- flagRST
-)
-
-const (
- // initialStreamWindow is the initial stream window size
- initialStreamWindow uint32 = 256 * 1024
-)
-
-const (
- // goAwayNormal is sent on a normal termination
- goAwayNormal uint32 = iota
-
- // goAwayProtoErr sent on a protocol error
- goAwayProtoErr
-
- // goAwayInternalErr sent on an internal error
- goAwayInternalErr
-)
-
-const (
- sizeOfVersion = 1
- sizeOfType = 1
- sizeOfFlags = 2
- sizeOfStreamID = 4
- sizeOfLength = 4
- headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
- sizeOfStreamID + sizeOfLength
-)
-
-type header []byte
-
-func (h header) Version() uint8 {
- return h[0]
-}
-
-func (h header) MsgType() uint8 {
- return h[1]
-}
-
-func (h header) Flags() uint16 {
- return binary.BigEndian.Uint16(h[2:4])
-}
-
-func (h header) StreamID() uint32 {
- return binary.BigEndian.Uint32(h[4:8])
-}
-
-func (h header) Length() uint32 {
- return binary.BigEndian.Uint32(h[8:12])
-}
-
-func (h header) String() string {
- return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
- h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
-}
-
-func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
- h[0] = protoVersion
- h[1] = msgType
- binary.BigEndian.PutUint16(h[2:4], flags)
- binary.BigEndian.PutUint32(h[4:8], streamID)
- binary.BigEndian.PutUint32(h[8:12], length)
-}
diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod
deleted file mode 100644
index 672a0e58..00000000
--- a/vendor/github.com/hashicorp/yamux/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/hashicorp/yamux
diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go
deleted file mode 100644
index 18a078c8..00000000
--- a/vendor/github.com/hashicorp/yamux/mux.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package yamux
-
-import (
- "fmt"
- "io"
- "log"
- "os"
- "time"
-)
-
-// Config is used to tune the Yamux session
-type Config struct {
- // AcceptBacklog is used to limit how many streams may be
- // waiting an accept.
- AcceptBacklog int
-
- // EnableKeepalive is used to do a period keep alive
- // messages using a ping.
- EnableKeepAlive bool
-
- // KeepAliveInterval is how often to perform the keep alive
- KeepAliveInterval time.Duration
-
- // ConnectionWriteTimeout is meant to be a "safety valve" timeout after
- // we which will suspect a problem with the underlying connection and
- // close it. This is only applied to writes, where's there's generally
- // an expectation that things will move along quickly.
- ConnectionWriteTimeout time.Duration
-
- // MaxStreamWindowSize is used to control the maximum
- // window size that we allow for a stream.
- MaxStreamWindowSize uint32
-
- // LogOutput is used to control the log destination. Either Logger or
- // LogOutput can be set, not both.
- LogOutput io.Writer
-
- // Logger is used to pass in the logger to be used. Either Logger or
- // LogOutput can be set, not both.
- Logger *log.Logger
-}
-
-// DefaultConfig is used to return a default configuration
-func DefaultConfig() *Config {
- return &Config{
- AcceptBacklog: 256,
- EnableKeepAlive: true,
- KeepAliveInterval: 30 * time.Second,
- ConnectionWriteTimeout: 10 * time.Second,
- MaxStreamWindowSize: initialStreamWindow,
- LogOutput: os.Stderr,
- }
-}
-
-// VerifyConfig is used to verify the sanity of configuration
-func VerifyConfig(config *Config) error {
- if config.AcceptBacklog <= 0 {
- return fmt.Errorf("backlog must be positive")
- }
- if config.KeepAliveInterval == 0 {
- return fmt.Errorf("keep-alive interval must be positive")
- }
- if config.MaxStreamWindowSize < initialStreamWindow {
- return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
- }
- if config.LogOutput != nil && config.Logger != nil {
- return fmt.Errorf("both Logger and LogOutput may not be set, select one")
- } else if config.LogOutput == nil && config.Logger == nil {
- return fmt.Errorf("one of Logger or LogOutput must be set, select one")
- }
- return nil
-}
-
-// Server is used to initialize a new server-side connection.
-// There must be at most one server-side connection. If a nil config is
-// provided, the DefaultConfiguration will be used.
-func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
- if config == nil {
- config = DefaultConfig()
- }
- if err := VerifyConfig(config); err != nil {
- return nil, err
- }
- return newSession(config, conn, false), nil
-}
-
-// Client is used to initialize a new client-side connection.
-// There must be at most one client-side connection.
-func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
- if config == nil {
- config = DefaultConfig()
- }
-
- if err := VerifyConfig(config); err != nil {
- return nil, err
- }
- return newSession(config, conn, true), nil
-}
diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go
deleted file mode 100644
index a80ddec3..00000000
--- a/vendor/github.com/hashicorp/yamux/session.go
+++ /dev/null
@@ -1,653 +0,0 @@
-package yamux
-
-import (
- "bufio"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "math"
- "net"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Session is used to wrap a reliable ordered connection and to
-// multiplex it into multiple streams.
-type Session struct {
- // remoteGoAway indicates the remote side does
- // not want futher connections. Must be first for alignment.
- remoteGoAway int32
-
- // localGoAway indicates that we should stop
- // accepting futher connections. Must be first for alignment.
- localGoAway int32
-
- // nextStreamID is the next stream we should
- // send. This depends if we are a client/server.
- nextStreamID uint32
-
- // config holds our configuration
- config *Config
-
- // logger is used for our logs
- logger *log.Logger
-
- // conn is the underlying connection
- conn io.ReadWriteCloser
-
- // bufRead is a buffered reader
- bufRead *bufio.Reader
-
- // pings is used to track inflight pings
- pings map[uint32]chan struct{}
- pingID uint32
- pingLock sync.Mutex
-
- // streams maps a stream id to a stream, and inflight has an entry
- // for any outgoing stream that has not yet been established. Both are
- // protected by streamLock.
- streams map[uint32]*Stream
- inflight map[uint32]struct{}
- streamLock sync.Mutex
-
- // synCh acts like a semaphore. It is sized to the AcceptBacklog which
- // is assumed to be symmetric between the client and server. This allows
- // the client to avoid exceeding the backlog and instead blocks the open.
- synCh chan struct{}
-
- // acceptCh is used to pass ready streams to the client
- acceptCh chan *Stream
-
- // sendCh is used to mark a stream as ready to send,
- // or to send a header out directly.
- sendCh chan sendReady
-
- // recvDoneCh is closed when recv() exits to avoid a race
- // between stream registration and stream shutdown
- recvDoneCh chan struct{}
-
- // shutdown is used to safely close a session
- shutdown bool
- shutdownErr error
- shutdownCh chan struct{}
- shutdownLock sync.Mutex
-}
-
-// sendReady is used to either mark a stream as ready
-// or to directly send a header
-type sendReady struct {
- Hdr []byte
- Body io.Reader
- Err chan error
-}
-
-// newSession is used to construct a new session
-func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
- logger := config.Logger
- if logger == nil {
- logger = log.New(config.LogOutput, "", log.LstdFlags)
- }
-
- s := &Session{
- config: config,
- logger: logger,
- conn: conn,
- bufRead: bufio.NewReader(conn),
- pings: make(map[uint32]chan struct{}),
- streams: make(map[uint32]*Stream),
- inflight: make(map[uint32]struct{}),
- synCh: make(chan struct{}, config.AcceptBacklog),
- acceptCh: make(chan *Stream, config.AcceptBacklog),
- sendCh: make(chan sendReady, 64),
- recvDoneCh: make(chan struct{}),
- shutdownCh: make(chan struct{}),
- }
- if client {
- s.nextStreamID = 1
- } else {
- s.nextStreamID = 2
- }
- go s.recv()
- go s.send()
- if config.EnableKeepAlive {
- go s.keepalive()
- }
- return s
-}
-
-// IsClosed does a safe check to see if we have shutdown
-func (s *Session) IsClosed() bool {
- select {
- case <-s.shutdownCh:
- return true
- default:
- return false
- }
-}
-
-// CloseChan returns a read-only channel which is closed as
-// soon as the session is closed.
-func (s *Session) CloseChan() <-chan struct{} {
- return s.shutdownCh
-}
-
-// NumStreams returns the number of currently open streams
-func (s *Session) NumStreams() int {
- s.streamLock.Lock()
- num := len(s.streams)
- s.streamLock.Unlock()
- return num
-}
-
-// Open is used to create a new stream as a net.Conn
-func (s *Session) Open() (net.Conn, error) {
- conn, err := s.OpenStream()
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-// OpenStream is used to create a new stream
-func (s *Session) OpenStream() (*Stream, error) {
- if s.IsClosed() {
- return nil, ErrSessionShutdown
- }
- if atomic.LoadInt32(&s.remoteGoAway) == 1 {
- return nil, ErrRemoteGoAway
- }
-
- // Block if we have too many inflight SYNs
- select {
- case s.synCh <- struct{}{}:
- case <-s.shutdownCh:
- return nil, ErrSessionShutdown
- }
-
-GET_ID:
- // Get an ID, and check for stream exhaustion
- id := atomic.LoadUint32(&s.nextStreamID)
- if id >= math.MaxUint32-1 {
- return nil, ErrStreamsExhausted
- }
- if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
- goto GET_ID
- }
-
- // Register the stream
- stream := newStream(s, id, streamInit)
- s.streamLock.Lock()
- s.streams[id] = stream
- s.inflight[id] = struct{}{}
- s.streamLock.Unlock()
-
- // Send the window update to create
- if err := stream.sendWindowUpdate(); err != nil {
- select {
- case <-s.synCh:
- default:
- s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
- }
- return nil, err
- }
- return stream, nil
-}
-
-// Accept is used to block until the next available stream
-// is ready to be accepted.
-func (s *Session) Accept() (net.Conn, error) {
- conn, err := s.AcceptStream()
- if err != nil {
- return nil, err
- }
- return conn, err
-}
-
-// AcceptStream is used to block until the next available stream
-// is ready to be accepted.
-func (s *Session) AcceptStream() (*Stream, error) {
- select {
- case stream := <-s.acceptCh:
- if err := stream.sendWindowUpdate(); err != nil {
- return nil, err
- }
- return stream, nil
- case <-s.shutdownCh:
- return nil, s.shutdownErr
- }
-}
-
-// Close is used to close the session and all streams.
-// Attempts to send a GoAway before closing the connection.
-func (s *Session) Close() error {
- s.shutdownLock.Lock()
- defer s.shutdownLock.Unlock()
-
- if s.shutdown {
- return nil
- }
- s.shutdown = true
- if s.shutdownErr == nil {
- s.shutdownErr = ErrSessionShutdown
- }
- close(s.shutdownCh)
- s.conn.Close()
- <-s.recvDoneCh
-
- s.streamLock.Lock()
- defer s.streamLock.Unlock()
- for _, stream := range s.streams {
- stream.forceClose()
- }
- return nil
-}
-
-// exitErr is used to handle an error that is causing the
-// session to terminate.
-func (s *Session) exitErr(err error) {
- s.shutdownLock.Lock()
- if s.shutdownErr == nil {
- s.shutdownErr = err
- }
- s.shutdownLock.Unlock()
- s.Close()
-}
-
-// GoAway can be used to prevent accepting further
-// connections. It does not close the underlying conn.
-func (s *Session) GoAway() error {
- return s.waitForSend(s.goAway(goAwayNormal), nil)
-}
-
-// goAway is used to send a goAway message
-func (s *Session) goAway(reason uint32) header {
- atomic.SwapInt32(&s.localGoAway, 1)
- hdr := header(make([]byte, headerSize))
- hdr.encode(typeGoAway, 0, 0, reason)
- return hdr
-}
-
-// Ping is used to measure the RTT response time
-func (s *Session) Ping() (time.Duration, error) {
- // Get a channel for the ping
- ch := make(chan struct{})
-
- // Get a new ping id, mark as pending
- s.pingLock.Lock()
- id := s.pingID
- s.pingID++
- s.pings[id] = ch
- s.pingLock.Unlock()
-
- // Send the ping request
- hdr := header(make([]byte, headerSize))
- hdr.encode(typePing, flagSYN, 0, id)
- if err := s.waitForSend(hdr, nil); err != nil {
- return 0, err
- }
-
- // Wait for a response
- start := time.Now()
- select {
- case <-ch:
- case <-time.After(s.config.ConnectionWriteTimeout):
- s.pingLock.Lock()
- delete(s.pings, id) // Ignore it if a response comes later.
- s.pingLock.Unlock()
- return 0, ErrTimeout
- case <-s.shutdownCh:
- return 0, ErrSessionShutdown
- }
-
- // Compute the RTT
- return time.Now().Sub(start), nil
-}
-
-// keepalive is a long running goroutine that periodically does
-// a ping to keep the connection alive.
-func (s *Session) keepalive() {
- for {
- select {
- case <-time.After(s.config.KeepAliveInterval):
- _, err := s.Ping()
- if err != nil {
- if err != ErrSessionShutdown {
- s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
- s.exitErr(ErrKeepAliveTimeout)
- }
- return
- }
- case <-s.shutdownCh:
- return
- }
- }
-}
-
-// waitForSendErr waits to send a header, checking for a potential shutdown
-func (s *Session) waitForSend(hdr header, body io.Reader) error {
- errCh := make(chan error, 1)
- return s.waitForSendErr(hdr, body, errCh)
-}
-
-// waitForSendErr waits to send a header with optional data, checking for a
-// potential shutdown. Since there's the expectation that sends can happen
-// in a timely manner, we enforce the connection write timeout here.
-func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
- t := timerPool.Get()
- timer := t.(*time.Timer)
- timer.Reset(s.config.ConnectionWriteTimeout)
- defer func() {
- timer.Stop()
- select {
- case <-timer.C:
- default:
- }
- timerPool.Put(t)
- }()
-
- ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
- select {
- case s.sendCh <- ready:
- case <-s.shutdownCh:
- return ErrSessionShutdown
- case <-timer.C:
- return ErrConnectionWriteTimeout
- }
-
- select {
- case err := <-errCh:
- return err
- case <-s.shutdownCh:
- return ErrSessionShutdown
- case <-timer.C:
- return ErrConnectionWriteTimeout
- }
-}
-
-// sendNoWait does a send without waiting. Since there's the expectation that
-// the send happens right here, we enforce the connection write timeout if we
-// can't queue the header to be sent.
-func (s *Session) sendNoWait(hdr header) error {
- t := timerPool.Get()
- timer := t.(*time.Timer)
- timer.Reset(s.config.ConnectionWriteTimeout)
- defer func() {
- timer.Stop()
- select {
- case <-timer.C:
- default:
- }
- timerPool.Put(t)
- }()
-
- select {
- case s.sendCh <- sendReady{Hdr: hdr}:
- return nil
- case <-s.shutdownCh:
- return ErrSessionShutdown
- case <-timer.C:
- return ErrConnectionWriteTimeout
- }
-}
-
-// send is a long running goroutine that sends data
-func (s *Session) send() {
- for {
- select {
- case ready := <-s.sendCh:
- // Send a header if ready
- if ready.Hdr != nil {
- sent := 0
- for sent < len(ready.Hdr) {
- n, err := s.conn.Write(ready.Hdr[sent:])
- if err != nil {
- s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
- asyncSendErr(ready.Err, err)
- s.exitErr(err)
- return
- }
- sent += n
- }
- }
-
- // Send data from a body if given
- if ready.Body != nil {
- _, err := io.Copy(s.conn, ready.Body)
- if err != nil {
- s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
- asyncSendErr(ready.Err, err)
- s.exitErr(err)
- return
- }
- }
-
- // No error, successful send
- asyncSendErr(ready.Err, nil)
- case <-s.shutdownCh:
- return
- }
- }
-}
-
-// recv is a long running goroutine that accepts new data
-func (s *Session) recv() {
- if err := s.recvLoop(); err != nil {
- s.exitErr(err)
- }
-}
-
-// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type
-var (
- handlers = []func(*Session, header) error{
- typeData: (*Session).handleStreamMessage,
- typeWindowUpdate: (*Session).handleStreamMessage,
- typePing: (*Session).handlePing,
- typeGoAway: (*Session).handleGoAway,
- }
-)
-
-// recvLoop continues to receive data until a fatal error is encountered
-func (s *Session) recvLoop() error {
- defer close(s.recvDoneCh)
- hdr := header(make([]byte, headerSize))
- for {
- // Read the header
- if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
- if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
- s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
- }
- return err
- }
-
- // Verify the version
- if hdr.Version() != protoVersion {
- s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
- return ErrInvalidVersion
- }
-
- mt := hdr.MsgType()
- if mt < typeData || mt > typeGoAway {
- return ErrInvalidMsgType
- }
-
- if err := handlers[mt](s, hdr); err != nil {
- return err
- }
- }
-}
-
-// handleStreamMessage handles either a data or window update frame
-func (s *Session) handleStreamMessage(hdr header) error {
- // Check for a new stream creation
- id := hdr.StreamID()
- flags := hdr.Flags()
- if flags&flagSYN == flagSYN {
- if err := s.incomingStream(id); err != nil {
- return err
- }
- }
-
- // Get the stream
- s.streamLock.Lock()
- stream := s.streams[id]
- s.streamLock.Unlock()
-
- // If we do not have a stream, likely we sent a RST
- if stream == nil {
- // Drain any data on the wire
- if hdr.MsgType() == typeData && hdr.Length() > 0 {
- s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
- if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
- s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
- return nil
- }
- } else {
- s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
- }
- return nil
- }
-
- // Check if this is a window update
- if hdr.MsgType() == typeWindowUpdate {
- if err := stream.incrSendWindow(hdr, flags); err != nil {
- if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
- s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
- }
- return err
- }
- return nil
- }
-
- // Read the new data
- if err := stream.readData(hdr, flags, s.bufRead); err != nil {
- if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
- s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
- }
- return err
- }
- return nil
-}
-
-// handlePing is invokde for a typePing frame
-func (s *Session) handlePing(hdr header) error {
- flags := hdr.Flags()
- pingID := hdr.Length()
-
- // Check if this is a query, respond back in a separate context so we
- // don't interfere with the receiving thread blocking for the write.
- if flags&flagSYN == flagSYN {
- go func() {
- hdr := header(make([]byte, headerSize))
- hdr.encode(typePing, flagACK, 0, pingID)
- if err := s.sendNoWait(hdr); err != nil {
- s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
- }
- }()
- return nil
- }
-
- // Handle a response
- s.pingLock.Lock()
- ch := s.pings[pingID]
- if ch != nil {
- delete(s.pings, pingID)
- close(ch)
- }
- s.pingLock.Unlock()
- return nil
-}
-
-// handleGoAway is invokde for a typeGoAway frame
-func (s *Session) handleGoAway(hdr header) error {
- code := hdr.Length()
- switch code {
- case goAwayNormal:
- atomic.SwapInt32(&s.remoteGoAway, 1)
- case goAwayProtoErr:
- s.logger.Printf("[ERR] yamux: received protocol error go away")
- return fmt.Errorf("yamux protocol error")
- case goAwayInternalErr:
- s.logger.Printf("[ERR] yamux: received internal error go away")
- return fmt.Errorf("remote yamux internal error")
- default:
- s.logger.Printf("[ERR] yamux: received unexpected go away")
- return fmt.Errorf("unexpected go away received")
- }
- return nil
-}
-
-// incomingStream is used to create a new incoming stream
-func (s *Session) incomingStream(id uint32) error {
- // Reject immediately if we are doing a go away
- if atomic.LoadInt32(&s.localGoAway) == 1 {
- hdr := header(make([]byte, headerSize))
- hdr.encode(typeWindowUpdate, flagRST, id, 0)
- return s.sendNoWait(hdr)
- }
-
- // Allocate a new stream
- stream := newStream(s, id, streamSYNReceived)
-
- s.streamLock.Lock()
- defer s.streamLock.Unlock()
-
- // Check if stream already exists
- if _, ok := s.streams[id]; ok {
- s.logger.Printf("[ERR] yamux: duplicate stream declared")
- if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
- s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
- }
- return ErrDuplicateStream
- }
-
- // Register the stream
- s.streams[id] = stream
-
- // Check if we've exceeded the backlog
- select {
- case s.acceptCh <- stream:
- return nil
- default:
- // Backlog exceeded! RST the stream
- s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
- delete(s.streams, id)
- stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
- return s.sendNoWait(stream.sendHdr)
- }
-}
-
-// closeStream is used to close a stream once both sides have
-// issued a close. If there was an in-flight SYN and the stream
-// was not yet established, then this will give the credit back.
-func (s *Session) closeStream(id uint32) {
- s.streamLock.Lock()
- if _, ok := s.inflight[id]; ok {
- select {
- case <-s.synCh:
- default:
- s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
- }
- }
- delete(s.streams, id)
- s.streamLock.Unlock()
-}
-
-// establishStream is used to mark a stream that was in the
-// SYN Sent state as established.
-func (s *Session) establishStream(id uint32) {
- s.streamLock.Lock()
- if _, ok := s.inflight[id]; ok {
- delete(s.inflight, id)
- } else {
- s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
- }
- select {
- case <-s.synCh:
- default:
- s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
- }
- s.streamLock.Unlock()
-}
diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md
deleted file mode 100644
index 183d797b..00000000
--- a/vendor/github.com/hashicorp/yamux/spec.md
+++ /dev/null
@@ -1,140 +0,0 @@
-# Specification
-
-We use this document to detail the internal specification of Yamux.
-This is used both as a guide for implementing Yamux, but also for
-alternative interoperable libraries to be built.
-
-# Framing
-
-Yamux uses a streaming connection underneath, but imposes a message
-framing so that it can be shared between many logical streams. Each
-frame contains a header like:
-
-* Version (8 bits)
-* Type (8 bits)
-* Flags (16 bits)
-* StreamID (32 bits)
-* Length (32 bits)
-
-This means that each header has a 12 byte overhead.
-All fields are encoded in network order (big endian).
-Each field is described below:
-
-## Version Field
-
-The version field is used for future backward compatibility. At the
-current time, the field is always set to 0, to indicate the initial
-version.
-
-## Type Field
-
-The type field is used to switch the frame message type. The following
-message types are supported:
-
-* 0x0 Data - Used to transmit data. May transmit zero length payloads
- depending on the flags.
-
-* 0x1 Window Update - Used to updated the senders receive window size.
- This is used to implement per-session flow control.
-
-* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat
- and do keep-alives over TCP.
-
-* 0x3 Go Away - Used to close a session.
-
-## Flag Field
-
-The flags field is used to provide additional information related
-to the message type. The following flags are supported:
-
-* 0x1 SYN - Signals the start of a new stream. May be sent with a data or
- window update message. Also sent with a ping to indicate outbound.
-
-* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data
- or window update message. Also sent with a ping to indicate response.
-
-* 0x4 FIN - Performs a half-close of a stream. May be sent with a data
- message or window update.
-
-* 0x8 RST - Reset a stream immediately. May be sent with a data or
- window update message.
-
-## StreamID Field
-
-The StreamID field is used to identify the logical stream the frame
-is addressing. The client side should use odd ID's, and the server even.
-This prevents any collisions. Additionally, the 0 ID is reserved to represent
-the session.
-
-Both Ping and Go Away messages should always use the 0 StreamID.
-
-## Length Field
-
-The meaning of the length field depends on the message type:
-
-* Data - provides the length of bytes following the header
-* Window update - provides a delta update to the window size
-* Ping - Contains an opaque value, echoed back
-* Go Away - Contains an error code
-
-# Message Flow
-
-There is no explicit connection setup, as Yamux relies on an underlying
-transport to be provided. However, there is a distinction between client
-and server side of the connection.
-
-## Opening a stream
-
-To open a stream, an initial data or window update frame is sent
-with a new StreamID. The SYN flag should be set to signal a new stream.
-
-The receiver must then reply with either a data or window update frame
-with the StreamID along with the ACK flag to accept the stream or with
-the RST flag to reject the stream.
-
-Because we are relying on the reliable stream underneath, a connection
-can begin sending data once the SYN flag is sent. The corresponding
-ACK does not need to be received. This is particularly well suited
-for an RPC system where a client wants to open a stream and immediately
-fire a request without waiting for the RTT of the ACK.
-
-This does introduce the possibility of a connection being rejected
-after data has been sent already. This is a slight semantic difference
-from TCP, where the conection cannot be refused after it is opened.
-Clients should be prepared to handle this by checking for an error
-that indicates a RST was received.
-
-## Closing a stream
-
-To close a stream, either side sends a data or window update frame
-along with the FIN flag. This does a half-close indicating the sender
-will send no further data.
-
-Once both sides have closed the connection, the stream is closed.
-
-Alternatively, if an error occurs, the RST flag can be used to
-hard close a stream immediately.
-
-## Flow Control
-
-When Yamux is initially starts each stream with a 256KB window size.
-There is no window size for the session.
-
-To prevent the streams from stalling, window update frames should be
-sent regularly. Yamux can be configured to provide a larger limit for
-windows sizes. Both sides assume the initial 256KB window, but can
-immediately send a window update as part of the SYN/ACK indicating a
-larger window.
-
-Both sides should track the number of bytes sent in Data frames
-only, as only they are tracked as part of the window size.
-
-## Session termination
-
-When a session is being terminated, the Go Away message should
-be sent. The Length should be set to one of the following to
-provide an error code:
-
-* 0x0 Normal termination
-* 0x1 Protocol error
-* 0x2 Internal error
diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go
deleted file mode 100644
index aa239197..00000000
--- a/vendor/github.com/hashicorp/yamux/stream.go
+++ /dev/null
@@ -1,470 +0,0 @@
-package yamux
-
-import (
- "bytes"
- "io"
- "sync"
- "sync/atomic"
- "time"
-)
-
-type streamState int
-
-const (
- streamInit streamState = iota
- streamSYNSent
- streamSYNReceived
- streamEstablished
- streamLocalClose
- streamRemoteClose
- streamClosed
- streamReset
-)
-
-// Stream is used to represent a logical stream
-// within a session.
-type Stream struct {
- recvWindow uint32
- sendWindow uint32
-
- id uint32
- session *Session
-
- state streamState
- stateLock sync.Mutex
-
- recvBuf *bytes.Buffer
- recvLock sync.Mutex
-
- controlHdr header
- controlErr chan error
- controlHdrLock sync.Mutex
-
- sendHdr header
- sendErr chan error
- sendLock sync.Mutex
-
- recvNotifyCh chan struct{}
- sendNotifyCh chan struct{}
-
- readDeadline atomic.Value // time.Time
- writeDeadline atomic.Value // time.Time
-}
-
-// newStream is used to construct a new stream within
-// a given session for an ID
-func newStream(session *Session, id uint32, state streamState) *Stream {
- s := &Stream{
- id: id,
- session: session,
- state: state,
- controlHdr: header(make([]byte, headerSize)),
- controlErr: make(chan error, 1),
- sendHdr: header(make([]byte, headerSize)),
- sendErr: make(chan error, 1),
- recvWindow: initialStreamWindow,
- sendWindow: initialStreamWindow,
- recvNotifyCh: make(chan struct{}, 1),
- sendNotifyCh: make(chan struct{}, 1),
- }
- s.readDeadline.Store(time.Time{})
- s.writeDeadline.Store(time.Time{})
- return s
-}
-
-// Session returns the associated stream session
-func (s *Stream) Session() *Session {
- return s.session
-}
-
-// StreamID returns the ID of this stream
-func (s *Stream) StreamID() uint32 {
- return s.id
-}
-
-// Read is used to read from the stream
-func (s *Stream) Read(b []byte) (n int, err error) {
- defer asyncNotify(s.recvNotifyCh)
-START:
- s.stateLock.Lock()
- switch s.state {
- case streamLocalClose:
- fallthrough
- case streamRemoteClose:
- fallthrough
- case streamClosed:
- s.recvLock.Lock()
- if s.recvBuf == nil || s.recvBuf.Len() == 0 {
- s.recvLock.Unlock()
- s.stateLock.Unlock()
- return 0, io.EOF
- }
- s.recvLock.Unlock()
- case streamReset:
- s.stateLock.Unlock()
- return 0, ErrConnectionReset
- }
- s.stateLock.Unlock()
-
- // If there is no data available, block
- s.recvLock.Lock()
- if s.recvBuf == nil || s.recvBuf.Len() == 0 {
- s.recvLock.Unlock()
- goto WAIT
- }
-
- // Read any bytes
- n, _ = s.recvBuf.Read(b)
- s.recvLock.Unlock()
-
- // Send a window update potentially
- err = s.sendWindowUpdate()
- return n, err
-
-WAIT:
- var timeout <-chan time.Time
- var timer *time.Timer
- readDeadline := s.readDeadline.Load().(time.Time)
- if !readDeadline.IsZero() {
- delay := readDeadline.Sub(time.Now())
- timer = time.NewTimer(delay)
- timeout = timer.C
- }
- select {
- case <-s.recvNotifyCh:
- if timer != nil {
- timer.Stop()
- }
- goto START
- case <-timeout:
- return 0, ErrTimeout
- }
-}
-
-// Write is used to write to the stream
-func (s *Stream) Write(b []byte) (n int, err error) {
- s.sendLock.Lock()
- defer s.sendLock.Unlock()
- total := 0
- for total < len(b) {
- n, err := s.write(b[total:])
- total += n
- if err != nil {
- return total, err
- }
- }
- return total, nil
-}
-
-// write is used to write to the stream, may return on
-// a short write.
-func (s *Stream) write(b []byte) (n int, err error) {
- var flags uint16
- var max uint32
- var body io.Reader
-START:
- s.stateLock.Lock()
- switch s.state {
- case streamLocalClose:
- fallthrough
- case streamClosed:
- s.stateLock.Unlock()
- return 0, ErrStreamClosed
- case streamReset:
- s.stateLock.Unlock()
- return 0, ErrConnectionReset
- }
- s.stateLock.Unlock()
-
- // If there is no data available, block
- window := atomic.LoadUint32(&s.sendWindow)
- if window == 0 {
- goto WAIT
- }
-
- // Determine the flags if any
- flags = s.sendFlags()
-
- // Send up to our send window
- max = min(window, uint32(len(b)))
- body = bytes.NewReader(b[:max])
-
- // Send the header
- s.sendHdr.encode(typeData, flags, s.id, max)
- if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
- return 0, err
- }
-
- // Reduce our send window
- atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
-
- // Unlock
- return int(max), err
-
-WAIT:
- var timeout <-chan time.Time
- writeDeadline := s.writeDeadline.Load().(time.Time)
- if !writeDeadline.IsZero() {
- delay := writeDeadline.Sub(time.Now())
- timeout = time.After(delay)
- }
- select {
- case <-s.sendNotifyCh:
- goto START
- case <-timeout:
- return 0, ErrTimeout
- }
- return 0, nil
-}
-
-// sendFlags determines any flags that are appropriate
-// based on the current stream state
-func (s *Stream) sendFlags() uint16 {
- s.stateLock.Lock()
- defer s.stateLock.Unlock()
- var flags uint16
- switch s.state {
- case streamInit:
- flags |= flagSYN
- s.state = streamSYNSent
- case streamSYNReceived:
- flags |= flagACK
- s.state = streamEstablished
- }
- return flags
-}
-
-// sendWindowUpdate potentially sends a window update enabling
-// further writes to take place. Must be invoked with the lock.
-func (s *Stream) sendWindowUpdate() error {
- s.controlHdrLock.Lock()
- defer s.controlHdrLock.Unlock()
-
- // Determine the delta update
- max := s.session.config.MaxStreamWindowSize
- var bufLen uint32
- s.recvLock.Lock()
- if s.recvBuf != nil {
- bufLen = uint32(s.recvBuf.Len())
- }
- delta := (max - bufLen) - s.recvWindow
-
- // Determine the flags if any
- flags := s.sendFlags()
-
- // Check if we can omit the update
- if delta < (max/2) && flags == 0 {
- s.recvLock.Unlock()
- return nil
- }
-
- // Update our window
- s.recvWindow += delta
- s.recvLock.Unlock()
-
- // Send the header
- s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
- if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
- return err
- }
- return nil
-}
-
-// sendClose is used to send a FIN
-func (s *Stream) sendClose() error {
- s.controlHdrLock.Lock()
- defer s.controlHdrLock.Unlock()
-
- flags := s.sendFlags()
- flags |= flagFIN
- s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
- if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
- return err
- }
- return nil
-}
-
-// Close is used to close the stream
-func (s *Stream) Close() error {
- closeStream := false
- s.stateLock.Lock()
- switch s.state {
- // Opened means we need to signal a close
- case streamSYNSent:
- fallthrough
- case streamSYNReceived:
- fallthrough
- case streamEstablished:
- s.state = streamLocalClose
- goto SEND_CLOSE
-
- case streamLocalClose:
- case streamRemoteClose:
- s.state = streamClosed
- closeStream = true
- goto SEND_CLOSE
-
- case streamClosed:
- case streamReset:
- default:
- panic("unhandled state")
- }
- s.stateLock.Unlock()
- return nil
-SEND_CLOSE:
- s.stateLock.Unlock()
- s.sendClose()
- s.notifyWaiting()
- if closeStream {
- s.session.closeStream(s.id)
- }
- return nil
-}
-
-// forceClose is used for when the session is exiting
-func (s *Stream) forceClose() {
- s.stateLock.Lock()
- s.state = streamClosed
- s.stateLock.Unlock()
- s.notifyWaiting()
-}
-
-// processFlags is used to update the state of the stream
-// based on set flags, if any. Lock must be held
-func (s *Stream) processFlags(flags uint16) error {
- // Close the stream without holding the state lock
- closeStream := false
- defer func() {
- if closeStream {
- s.session.closeStream(s.id)
- }
- }()
-
- s.stateLock.Lock()
- defer s.stateLock.Unlock()
- if flags&flagACK == flagACK {
- if s.state == streamSYNSent {
- s.state = streamEstablished
- }
- s.session.establishStream(s.id)
- }
- if flags&flagFIN == flagFIN {
- switch s.state {
- case streamSYNSent:
- fallthrough
- case streamSYNReceived:
- fallthrough
- case streamEstablished:
- s.state = streamRemoteClose
- s.notifyWaiting()
- case streamLocalClose:
- s.state = streamClosed
- closeStream = true
- s.notifyWaiting()
- default:
- s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
- return ErrUnexpectedFlag
- }
- }
- if flags&flagRST == flagRST {
- s.state = streamReset
- closeStream = true
- s.notifyWaiting()
- }
- return nil
-}
-
-// notifyWaiting notifies all the waiting channels
-func (s *Stream) notifyWaiting() {
- asyncNotify(s.recvNotifyCh)
- asyncNotify(s.sendNotifyCh)
-}
-
-// incrSendWindow updates the size of our send window
-func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
- if err := s.processFlags(flags); err != nil {
- return err
- }
-
- // Increase window, unblock a sender
- atomic.AddUint32(&s.sendWindow, hdr.Length())
- asyncNotify(s.sendNotifyCh)
- return nil
-}
-
-// readData is used to handle a data frame
-func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
- if err := s.processFlags(flags); err != nil {
- return err
- }
-
- // Check that our recv window is not exceeded
- length := hdr.Length()
- if length == 0 {
- return nil
- }
-
- // Wrap in a limited reader
- conn = &io.LimitedReader{R: conn, N: int64(length)}
-
- // Copy into buffer
- s.recvLock.Lock()
-
- if length > s.recvWindow {
- s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length)
- return ErrRecvWindowExceeded
- }
-
- if s.recvBuf == nil {
- // Allocate the receive buffer just-in-time to fit the full data frame.
- // This way we can read in the whole packet without further allocations.
- s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
- }
- if _, err := io.Copy(s.recvBuf, conn); err != nil {
- s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
- s.recvLock.Unlock()
- return err
- }
-
- // Decrement the receive window
- s.recvWindow -= length
- s.recvLock.Unlock()
-
- // Unblock any readers
- asyncNotify(s.recvNotifyCh)
- return nil
-}
-
-// SetDeadline sets the read and write deadlines
-func (s *Stream) SetDeadline(t time.Time) error {
- if err := s.SetReadDeadline(t); err != nil {
- return err
- }
- if err := s.SetWriteDeadline(t); err != nil {
- return err
- }
- return nil
-}
-
-// SetReadDeadline sets the deadline for future Read calls.
-func (s *Stream) SetReadDeadline(t time.Time) error {
- s.readDeadline.Store(t)
- return nil
-}
-
-// SetWriteDeadline sets the deadline for future Write calls
-func (s *Stream) SetWriteDeadline(t time.Time) error {
- s.writeDeadline.Store(t)
- return nil
-}
-
-// Shrink is used to compact the amount of buffers utilized
-// This is useful when using Yamux in a connection pool to reduce
-// the idle memory utilization.
-func (s *Stream) Shrink() {
- s.recvLock.Lock()
- if s.recvBuf != nil && s.recvBuf.Len() == 0 {
- s.recvBuf = nil
- }
- s.recvLock.Unlock()
-}
diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go
deleted file mode 100644
index 8a73e924..00000000
--- a/vendor/github.com/hashicorp/yamux/util.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package yamux
-
-import (
- "sync"
- "time"
-)
-
-var (
- timerPool = &sync.Pool{
- New: func() interface{} {
- timer := time.NewTimer(time.Hour * 1e6)
- timer.Stop()
- return timer
- },
- }
-)
-
-// asyncSendErr is used to try an async send of an error
-func asyncSendErr(ch chan error, err error) {
- if ch == nil {
- return
- }
- select {
- case ch <- err:
- default:
- }
-}
-
-// asyncNotify is used to signal a waiting goroutine
-func asyncNotify(ch chan struct{}) {
- select {
- case ch <- struct{}{}:
- default:
- }
-}
-
-// min computes the minimum of two values
-func min(a, b uint32) uint32 {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/hpcloud/tail/.gitignore b/vendor/github.com/hpcloud/tail/.gitignore
deleted file mode 100644
index 6d9953c3..00000000
--- a/vendor/github.com/hpcloud/tail/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-.test
-.go
-
diff --git a/vendor/github.com/hpcloud/tail/.travis.yml b/vendor/github.com/hpcloud/tail/.travis.yml
deleted file mode 100644
index 9cf8bb7f..00000000
--- a/vendor/github.com/hpcloud/tail/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-script:
- - go test -race -v ./...
-
-go:
- - 1.4
- - 1.5
- - 1.6
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-install:
- - go get gopkg.in/fsnotify.v1
- - go get gopkg.in/tomb.v1
diff --git a/vendor/github.com/hpcloud/tail/CHANGES.md b/vendor/github.com/hpcloud/tail/CHANGES.md
deleted file mode 100644
index 422790c0..00000000
--- a/vendor/github.com/hpcloud/tail/CHANGES.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# API v1 (gopkg.in/hpcloud/tail.v1)
-
-## April, 2016
-
-* Migrated to godep, as depman is not longer supported
-* Introduced golang vendoring feature
-* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file
-
-## July, 2015
-
-* Fix inotify watcher leak; remove `Cleanup` (#51)
-
-# API v0 (gopkg.in/hpcloud/tail.v0)
-
-## June, 2015
-
-* Don't return partial lines (PR #40)
-* Use stable version of fsnotify (#46)
-
-## July, 2014
-
-* Fix tail for Windows (PR #36)
-
-## May, 2014
-
-* Improved rate limiting using leaky bucket (PR #29)
-* Fix odd line splitting (PR #30)
-
-## Apr, 2014
-
-* LimitRate now discards read buffer (PR #28)
-* allow reading of longer lines if MaxLineSize is unset (PR #24)
-* updated deps.json to latest fsnotify (441bbc86b1)
-
-## Feb, 2014
-
-* added `Config.Logger` to suppress library logging
-
-## Nov, 2013
-
-* add Cleanup to remove leaky inotify watches (PR #20)
-
-## Aug, 2013
-
-* redesigned Location field (PR #12)
-* add tail.Tell (PR #14)
-
-## July, 2013
-
-* Rate limiting (PR #10)
-
-## May, 2013
-
-* Detect file deletions/renames in polling file watcher (PR #1)
-* Detect file truncation
-* Fix potential race condition when reopening the file (issue 5)
-* Fix potential blocking of `tail.Stop` (issue 4)
-* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
-* Support Follow=false
-
-## Feb, 2013
-
-* Initial open source release
diff --git a/vendor/github.com/hpcloud/tail/Dockerfile b/vendor/github.com/hpcloud/tail/Dockerfile
deleted file mode 100644
index cd297b94..00000000
--- a/vendor/github.com/hpcloud/tail/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM golang
-
-RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
-ADD . $GOPATH/src/github.com/hpcloud/tail/
-
-# expecting to fetch dependencies successfully.
-RUN go get -v github.com/hpcloud/tail
-
-# expecting to run the test successfully.
-RUN go test -v github.com/hpcloud/tail
-
-# expecting to install successfully
-RUN go install -v github.com/hpcloud/tail
-RUN go install -v github.com/hpcloud/tail/cmd/gotail
-
-RUN $GOPATH/bin/gotail -h || true
-
-ENV PATH $GOPATH/bin:$PATH
-CMD ["gotail"]
diff --git a/vendor/github.com/hpcloud/tail/LICENSE.txt b/vendor/github.com/hpcloud/tail/LICENSE.txt
deleted file mode 100644
index 818d802a..00000000
--- a/vendor/github.com/hpcloud/tail/LICENSE.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# The MIT License (MIT)
-
-# © Copyright 2015 Hewlett Packard Enterprise Development LP
-Copyright (c) 2014 ActiveState
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/hpcloud/tail/Makefile b/vendor/github.com/hpcloud/tail/Makefile
deleted file mode 100644
index 6591b24f..00000000
--- a/vendor/github.com/hpcloud/tail/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-default: test
-
-test: *.go
- go test -v -race ./...
-
-fmt:
- gofmt -w .
-
-# Run the test in an isolated environment.
-fulltest:
- docker build -t hpcloud/tail .
diff --git a/vendor/github.com/hpcloud/tail/README.md b/vendor/github.com/hpcloud/tail/README.md
deleted file mode 100644
index fb7fbc26..00000000
--- a/vendor/github.com/hpcloud/tail/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail)
-[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail)
-
-# Go package for tail-ing files
-
-A Go package striving to emulate the features of the BSD `tail` program.
-
-```Go
-t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
-for line := range t.Lines {
- fmt.Println(line.Text)
-}
-```
-
-See [API documentation](http://godoc.org/github.com/hpcloud/tail).
-
-## Log rotation
-
-Tail comes with full support for truncation/move detection as it is
-designed to work with log rotation tools.
-
-## Installing
-
- go get github.com/hpcloud/tail/...
-
-## Windows support
-
-This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.
diff --git a/vendor/github.com/hpcloud/tail/appveyor.yml b/vendor/github.com/hpcloud/tail/appveyor.yml
deleted file mode 100644
index d370055b..00000000
--- a/vendor/github.com/hpcloud/tail/appveyor.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-version: 0.{build}
-skip_tags: true
-cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
-build_script:
-- SET GOPATH=c:\workspace
-- go test -v -race ./...
-test: off
-clone_folder: c:\workspace\src\github.com\hpcloud\tail
-branches:
- only:
- - master
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/vendor/github.com/hpcloud/tail/ratelimiter/Licence
deleted file mode 100644
index 434aab19..00000000
--- a/vendor/github.com/hpcloud/tail/ratelimiter/Licence
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright (C) 2013 99designs
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
deleted file mode 100644
index 358b69e7..00000000
--- a/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
-package ratelimiter
-
-import (
- "time"
-)
-
-type LeakyBucket struct {
- Size uint16
- Fill float64
- LeakInterval time.Duration // time.Duration for 1 unit of size to leak
- Lastupdate time.Time
- Now func() time.Time
-}
-
-func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
- bucket := LeakyBucket{
- Size: size,
- Fill: 0,
- LeakInterval: leakInterval,
- Now: time.Now,
- Lastupdate: time.Now(),
- }
-
- return &bucket
-}
-
-func (b *LeakyBucket) updateFill() {
- now := b.Now()
- if b.Fill > 0 {
- elapsed := now.Sub(b.Lastupdate)
-
- b.Fill -= float64(elapsed) / float64(b.LeakInterval)
- if b.Fill < 0 {
- b.Fill = 0
- }
- }
- b.Lastupdate = now
-}
-
-func (b *LeakyBucket) Pour(amount uint16) bool {
- b.updateFill()
-
- var newfill float64 = b.Fill + float64(amount)
-
- if newfill > float64(b.Size) {
- return false
- }
-
- b.Fill = newfill
-
- return true
-}
-
-// The time at which this bucket will be completely drained
-func (b *LeakyBucket) DrainedAt() time.Time {
- return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
-}
-
-// The duration until this bucket is completely drained
-func (b *LeakyBucket) TimeToDrain() time.Duration {
- return b.DrainedAt().Sub(b.Now())
-}
-
-func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
- return b.Now().Sub(b.Lastupdate)
-}
-
-type LeakyBucketSer struct {
- Size uint16
- Fill float64
- LeakInterval time.Duration // time.Duration for 1 unit of size to leak
- Lastupdate time.Time
-}
-
-func (b *LeakyBucket) Serialise() *LeakyBucketSer {
- bucket := LeakyBucketSer{
- Size: b.Size,
- Fill: b.Fill,
- LeakInterval: b.LeakInterval,
- Lastupdate: b.Lastupdate,
- }
-
- return &bucket
-}
-
-func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
- bucket := LeakyBucket{
- Size: b.Size,
- Fill: b.Fill,
- LeakInterval: b.LeakInterval,
- Lastupdate: b.Lastupdate,
- Now: time.Now,
- }
-
- return &bucket
-}
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/vendor/github.com/hpcloud/tail/ratelimiter/memory.go
deleted file mode 100644
index 8f6a5784..00000000
--- a/vendor/github.com/hpcloud/tail/ratelimiter/memory.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package ratelimiter
-
-import (
- "errors"
- "time"
-)
-
-const GC_SIZE int = 100
-
-type Memory struct {
- store map[string]LeakyBucket
- lastGCCollected time.Time
-}
-
-func NewMemory() *Memory {
- m := new(Memory)
- m.store = make(map[string]LeakyBucket)
- m.lastGCCollected = time.Now()
- return m
-}
-
-func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
-
- bucket, ok := m.store[key]
- if !ok {
- return nil, errors.New("miss")
- }
-
- return &bucket, nil
-}
-
-func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
-
- if len(m.store) > GC_SIZE {
- m.GarbageCollect()
- }
-
- m.store[key] = bucket
-
- return nil
-}
-
-func (m *Memory) GarbageCollect() {
- now := time.Now()
-
- // rate limit GC to once per minute
- if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
-
- for key, bucket := range m.store {
- // if the bucket is drained, then GC
- if bucket.DrainedAt().Unix() > now.Unix() {
- delete(m.store, key)
- }
- }
-
- m.lastGCCollected = now
- }
-}
diff --git a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/vendor/github.com/hpcloud/tail/ratelimiter/storage.go
deleted file mode 100644
index 89b2fe88..00000000
--- a/vendor/github.com/hpcloud/tail/ratelimiter/storage.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package ratelimiter
-
-type Storage interface {
- GetBucketFor(string) (*LeakyBucket, error)
- SetBucketFor(string, LeakyBucket) error
-}
diff --git a/vendor/github.com/hpcloud/tail/tail.go b/vendor/github.com/hpcloud/tail/tail.go
deleted file mode 100644
index 2d252d60..00000000
--- a/vendor/github.com/hpcloud/tail/tail.go
+++ /dev/null
@@ -1,438 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package tail
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "strings"
- "sync"
- "time"
-
- "github.com/hpcloud/tail/ratelimiter"
- "github.com/hpcloud/tail/util"
- "github.com/hpcloud/tail/watch"
- "gopkg.in/tomb.v1"
-)
-
-var (
- ErrStop = fmt.Errorf("tail should now stop")
-)
-
-type Line struct {
- Text string
- Time time.Time
- Err error // Error from tail
-}
-
-// NewLine returns a Line with present time.
-func NewLine(text string) *Line {
- return &Line{text, time.Now(), nil}
-}
-
-// SeekInfo represents arguments to `os.Seek`
-type SeekInfo struct {
- Offset int64
- Whence int // os.SEEK_*
-}
-
-type logger interface {
- Fatal(v ...interface{})
- Fatalf(format string, v ...interface{})
- Fatalln(v ...interface{})
- Panic(v ...interface{})
- Panicf(format string, v ...interface{})
- Panicln(v ...interface{})
- Print(v ...interface{})
- Printf(format string, v ...interface{})
- Println(v ...interface{})
-}
-
-// Config is used to specify how a file must be tailed.
-type Config struct {
- // File-specifc
- Location *SeekInfo // Seek to this location before tailing
- ReOpen bool // Reopen recreated files (tail -F)
- MustExist bool // Fail early if the file does not exist
- Poll bool // Poll for file changes instead of using inotify
- Pipe bool // Is a named pipe (mkfifo)
- RateLimiter *ratelimiter.LeakyBucket
-
- // Generic IO
- Follow bool // Continue looking for new lines (tail -f)
- MaxLineSize int // If non-zero, split longer lines into multiple lines
-
- // Logger, when nil, is set to tail.DefaultLogger
- // To disable logging: set field to tail.DiscardingLogger
- Logger logger
-}
-
-type Tail struct {
- Filename string
- Lines chan *Line
- Config
-
- file *os.File
- reader *bufio.Reader
-
- watcher watch.FileWatcher
- changes *watch.FileChanges
-
- tomb.Tomb // provides: Done, Kill, Dying
-
- lk sync.Mutex
-}
-
-var (
- // DefaultLogger is used when Config.Logger == nil
- DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
- // DiscardingLogger can be used to disable logging output
- DiscardingLogger = log.New(ioutil.Discard, "", 0)
-)
-
-// TailFile begins tailing the file. Output stream is made available
-// via the `Tail.Lines` channel. To handle errors during tailing,
-// invoke the `Wait` or `Err` method after finishing reading from the
-// `Lines` channel.
-func TailFile(filename string, config Config) (*Tail, error) {
- if config.ReOpen && !config.Follow {
- util.Fatal("cannot set ReOpen without Follow.")
- }
-
- t := &Tail{
- Filename: filename,
- Lines: make(chan *Line),
- Config: config,
- }
-
- // when Logger was not specified in config, use default logger
- if t.Logger == nil {
- t.Logger = log.New(os.Stderr, "", log.LstdFlags)
- }
-
- if t.Poll {
- t.watcher = watch.NewPollingFileWatcher(filename)
- } else {
- t.watcher = watch.NewInotifyFileWatcher(filename)
- }
-
- if t.MustExist {
- var err error
- t.file, err = OpenFile(t.Filename)
- if err != nil {
- return nil, err
- }
- }
-
- go t.tailFileSync()
-
- return t, nil
-}
-
-// Return the file's current position, like stdio's ftell().
-// But this value is not very accurate.
-// it may readed one line in the chan(tail.Lines),
-// so it may lost one line.
-func (tail *Tail) Tell() (offset int64, err error) {
- if tail.file == nil {
- return
- }
- offset, err = tail.file.Seek(0, os.SEEK_CUR)
- if err != nil {
- return
- }
-
- tail.lk.Lock()
- defer tail.lk.Unlock()
- if tail.reader == nil {
- return
- }
-
- offset -= int64(tail.reader.Buffered())
- return
-}
-
-// Stop stops the tailing activity.
-func (tail *Tail) Stop() error {
- tail.Kill(nil)
- return tail.Wait()
-}
-
-// StopAtEOF stops tailing as soon as the end of the file is reached.
-func (tail *Tail) StopAtEOF() error {
- tail.Kill(errStopAtEOF)
- return tail.Wait()
-}
-
-var errStopAtEOF = errors.New("tail: stop at eof")
-
-func (tail *Tail) close() {
- close(tail.Lines)
- tail.closeFile()
-}
-
-func (tail *Tail) closeFile() {
- if tail.file != nil {
- tail.file.Close()
- tail.file = nil
- }
-}
-
-func (tail *Tail) reopen() error {
- tail.closeFile()
- for {
- var err error
- tail.file, err = OpenFile(tail.Filename)
- if err != nil {
- if os.IsNotExist(err) {
- tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
- if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
- if err == tomb.ErrDying {
- return err
- }
- return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
- }
- continue
- }
- return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
- }
- break
- }
- return nil
-}
-
-func (tail *Tail) readLine() (string, error) {
- tail.lk.Lock()
- line, err := tail.reader.ReadString('\n')
- tail.lk.Unlock()
- if err != nil {
- // Note ReadString "returns the data read before the error" in
- // case of an error, including EOF, so we return it as is. The
- // caller is expected to process it if err is EOF.
- return line, err
- }
-
- line = strings.TrimRight(line, "\n")
-
- return line, err
-}
-
-func (tail *Tail) tailFileSync() {
- defer tail.Done()
- defer tail.close()
-
- if !tail.MustExist {
- // deferred first open.
- err := tail.reopen()
- if err != nil {
- if err != tomb.ErrDying {
- tail.Kill(err)
- }
- return
- }
- }
-
- // Seek to requested location on first open of the file.
- if tail.Location != nil {
- _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
- tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
- if err != nil {
- tail.Killf("Seek error on %s: %s", tail.Filename, err)
- return
- }
- }
-
- tail.openReader()
-
- var offset int64 = 0
- var err error
-
- // Read line by line.
- for {
- // do not seek in named pipes
- if !tail.Pipe {
- // grab the position in case we need to back up in the event of a half-line
- offset, err = tail.Tell()
- if err != nil {
- tail.Kill(err)
- return
- }
- }
-
- line, err := tail.readLine()
-
- // Process `line` even if err is EOF.
- if err == nil {
- cooloff := !tail.sendLine(line)
- if cooloff {
- // Wait a second before seeking till the end of
- // file when rate limit is reached.
- msg := fmt.Sprintf(
- "Too much log activity; waiting a second " +
- "before resuming tailing")
- tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
- select {
- case <-time.After(time.Second):
- case <-tail.Dying():
- return
- }
- if err := tail.seekEnd(); err != nil {
- tail.Kill(err)
- return
- }
- }
- } else if err == io.EOF {
- if !tail.Follow {
- if line != "" {
- tail.sendLine(line)
- }
- return
- }
-
- if tail.Follow && line != "" {
- // this has the potential to never return the last line if
- // it's not followed by a newline; seems a fair trade here
- err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
- if err != nil {
- tail.Kill(err)
- return
- }
- }
-
- // When EOF is reached, wait for more data to become
- // available. Wait strategy is based on the `tail.watcher`
- // implementation (inotify or polling).
- err := tail.waitForChanges()
- if err != nil {
- if err != ErrStop {
- tail.Kill(err)
- }
- return
- }
- } else {
- // non-EOF error
- tail.Killf("Error reading %s: %s", tail.Filename, err)
- return
- }
-
- select {
- case <-tail.Dying():
- if tail.Err() == errStopAtEOF {
- continue
- }
- return
- default:
- }
- }
-}
-
-// waitForChanges waits until the file has been appended, deleted,
-// moved or truncated. When moved or deleted - the file will be
-// reopened if ReOpen is true. Truncated files are always reopened.
-func (tail *Tail) waitForChanges() error {
- if tail.changes == nil {
- pos, err := tail.file.Seek(0, os.SEEK_CUR)
- if err != nil {
- return err
- }
- tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
- if err != nil {
- return err
- }
- }
-
- select {
- case <-tail.changes.Modified:
- return nil
- case <-tail.changes.Deleted:
- tail.changes = nil
- if tail.ReOpen {
- // XXX: we must not log from a library.
- tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
- if err := tail.reopen(); err != nil {
- return err
- }
- tail.Logger.Printf("Successfully reopened %s", tail.Filename)
- tail.openReader()
- return nil
- } else {
- tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
- return ErrStop
- }
- case <-tail.changes.Truncated:
- // Always reopen truncated files (Follow is true)
- tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
- if err := tail.reopen(); err != nil {
- return err
- }
- tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
- tail.openReader()
- return nil
- case <-tail.Dying():
- return ErrStop
- }
- panic("unreachable")
-}
-
-func (tail *Tail) openReader() {
- if tail.MaxLineSize > 0 {
- // add 2 to account for newline characters
- tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
- } else {
- tail.reader = bufio.NewReader(tail.file)
- }
-}
-
-func (tail *Tail) seekEnd() error {
- return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
-}
-
-func (tail *Tail) seekTo(pos SeekInfo) error {
- _, err := tail.file.Seek(pos.Offset, pos.Whence)
- if err != nil {
- return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
- }
- // Reset the read buffer whenever the file is re-seek'ed
- tail.reader.Reset(tail.file)
- return nil
-}
-
-// sendLine sends the line(s) to Lines channel, splitting longer lines
-// if necessary. Return false if rate limit is reached.
-func (tail *Tail) sendLine(line string) bool {
- now := time.Now()
- lines := []string{line}
-
- // Split longer lines
- if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
- lines = util.PartitionString(line, tail.MaxLineSize)
- }
-
- for _, line := range lines {
- tail.Lines <- &Line{line, now, nil}
- }
-
- if tail.Config.RateLimiter != nil {
- ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
- if !ok {
- tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
- tail.Filename)
- return false
- }
- }
-
- return true
-}
-
-// Cleanup removes inotify watches added by the tail package. This function is
-// meant to be invoked from a process's exit handler. Linux kernel may not
-// automatically remove inotify watches after the process exits.
-func (tail *Tail) Cleanup() {
- watch.Cleanup(tail.Filename)
-}
diff --git a/vendor/github.com/hpcloud/tail/tail_posix.go b/vendor/github.com/hpcloud/tail/tail_posix.go
deleted file mode 100644
index bc4dc335..00000000
--- a/vendor/github.com/hpcloud/tail/tail_posix.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux darwin freebsd netbsd openbsd
-
-package tail
-
-import (
- "os"
-)
-
-func OpenFile(name string) (file *os.File, err error) {
- return os.Open(name)
-}
diff --git a/vendor/github.com/hpcloud/tail/tail_windows.go b/vendor/github.com/hpcloud/tail/tail_windows.go
deleted file mode 100644
index ef2cfca1..00000000
--- a/vendor/github.com/hpcloud/tail/tail_windows.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build windows
-
-package tail
-
-import (
- "github.com/hpcloud/tail/winfile"
- "os"
-)
-
-func OpenFile(name string) (file *os.File, err error) {
- return winfile.OpenFile(name, os.O_RDONLY, 0)
-}
diff --git a/vendor/github.com/hpcloud/tail/util/util.go b/vendor/github.com/hpcloud/tail/util/util.go
deleted file mode 100644
index 54151fe3..00000000
--- a/vendor/github.com/hpcloud/tail/util/util.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package util
-
-import (
- "fmt"
- "log"
- "os"
- "runtime/debug"
-)
-
-type Logger struct {
- *log.Logger
-}
-
-var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
-
-// fatal is like panic except it displays only the current goroutine's stack.
-func Fatal(format string, v ...interface{}) {
- // https://github.com/hpcloud/log/blob/master/log.go#L45
- LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
- os.Exit(1)
-}
-
-// partitionString partitions the string into chunks of given size,
-// with the last chunk of variable size.
-func PartitionString(s string, chunkSize int) []string {
- if chunkSize <= 0 {
- panic("invalid chunkSize")
- }
- length := len(s)
- chunks := 1 + length/chunkSize
- start := 0
- end := chunkSize
- parts := make([]string, 0, chunks)
- for {
- if end > length {
- end = length
- }
- parts = append(parts, s[start:end])
- if end == length {
- break
- }
- start, end = end, end+chunkSize
- }
- return parts
-}
diff --git a/vendor/github.com/hpcloud/tail/watch/filechanges.go b/vendor/github.com/hpcloud/tail/watch/filechanges.go
deleted file mode 100644
index 3ce5dcec..00000000
--- a/vendor/github.com/hpcloud/tail/watch/filechanges.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package watch
-
-type FileChanges struct {
- Modified chan bool // Channel to get notified of modifications
- Truncated chan bool // Channel to get notified of truncations
- Deleted chan bool // Channel to get notified of deletions/renames
-}
-
-func NewFileChanges() *FileChanges {
- return &FileChanges{
- make(chan bool), make(chan bool), make(chan bool)}
-}
-
-func (fc *FileChanges) NotifyModified() {
- sendOnlyIfEmpty(fc.Modified)
-}
-
-func (fc *FileChanges) NotifyTruncated() {
- sendOnlyIfEmpty(fc.Truncated)
-}
-
-func (fc *FileChanges) NotifyDeleted() {
- sendOnlyIfEmpty(fc.Deleted)
-}
-
-// sendOnlyIfEmpty sends on a bool channel only if the channel has no
-// backlog to be read by other goroutines. This concurrency pattern
-// can be used to notify other goroutines if and only if they are
-// looking for it (i.e., subsequent notifications can be compressed
-// into one).
-func sendOnlyIfEmpty(ch chan bool) {
- select {
- case ch <- true:
- default:
- }
-}
diff --git a/vendor/github.com/hpcloud/tail/watch/inotify.go b/vendor/github.com/hpcloud/tail/watch/inotify.go
deleted file mode 100644
index 4478f1e1..00000000
--- a/vendor/github.com/hpcloud/tail/watch/inotify.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/hpcloud/tail/util"
-
- "gopkg.in/fsnotify.v1"
- "gopkg.in/tomb.v1"
-)
-
-// InotifyFileWatcher uses inotify to monitor file changes.
-type InotifyFileWatcher struct {
- Filename string
- Size int64
-}
-
-func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
- fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
- return fw
-}
-
-func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
- err := WatchCreate(fw.Filename)
- if err != nil {
- return err
- }
- defer RemoveWatchCreate(fw.Filename)
-
- // Do a real check now as the file might have been created before
- // calling `WatchFlags` above.
- if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
- // file exists, or stat returned an error.
- return err
- }
-
- events := Events(fw.Filename)
-
- for {
- select {
- case evt, ok := <-events:
- if !ok {
- return fmt.Errorf("inotify watcher has been closed")
- }
- evtName, err := filepath.Abs(evt.Name)
- if err != nil {
- return err
- }
- fwFilename, err := filepath.Abs(fw.Filename)
- if err != nil {
- return err
- }
- if evtName == fwFilename {
- return nil
- }
- case <-t.Dying():
- return tomb.ErrDying
- }
- }
- panic("unreachable")
-}
-
-func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
- err := Watch(fw.Filename)
- if err != nil {
- return nil, err
- }
-
- changes := NewFileChanges()
- fw.Size = pos
-
- go func() {
- defer RemoveWatch(fw.Filename)
-
- events := Events(fw.Filename)
-
- for {
- prevSize := fw.Size
-
- var evt fsnotify.Event
- var ok bool
-
- select {
- case evt, ok = <-events:
- if !ok {
- return
- }
- case <-t.Dying():
- return
- }
-
- switch {
- case evt.Op&fsnotify.Remove == fsnotify.Remove:
- fallthrough
-
- case evt.Op&fsnotify.Rename == fsnotify.Rename:
- changes.NotifyDeleted()
- return
-
- case evt.Op&fsnotify.Write == fsnotify.Write:
- fi, err := os.Stat(fw.Filename)
- if err != nil {
- if os.IsNotExist(err) {
- changes.NotifyDeleted()
- return
- }
- // XXX: report this error back to the user
- util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
- }
- fw.Size = fi.Size()
-
- if prevSize > 0 && prevSize > fw.Size {
- changes.NotifyTruncated()
- } else {
- changes.NotifyModified()
- }
- prevSize = fw.Size
- }
- }
- }()
-
- return changes, nil
-}
diff --git a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
deleted file mode 100644
index 03be4275..00000000
--- a/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "log"
- "os"
- "path/filepath"
- "sync"
- "syscall"
-
- "github.com/hpcloud/tail/util"
-
- "gopkg.in/fsnotify.v1"
-)
-
-type InotifyTracker struct {
- mux sync.Mutex
- watcher *fsnotify.Watcher
- chans map[string]chan fsnotify.Event
- done map[string]chan bool
- watchNums map[string]int
- watch chan *watchInfo
- remove chan *watchInfo
- error chan error
-}
-
-type watchInfo struct {
- op fsnotify.Op
- fname string
-}
-
-func (this *watchInfo) isCreate() bool {
- return this.op == fsnotify.Create
-}
-
-var (
- // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
- shared *InotifyTracker
-
- // these are used to ensure the shared InotifyTracker is run exactly once
- once = sync.Once{}
- goRun = func() {
- shared = &InotifyTracker{
- mux: sync.Mutex{},
- chans: make(map[string]chan fsnotify.Event),
- done: make(map[string]chan bool),
- watchNums: make(map[string]int),
- watch: make(chan *watchInfo),
- remove: make(chan *watchInfo),
- error: make(chan error),
- }
- go shared.run()
- }
-
- logger = log.New(os.Stderr, "", log.LstdFlags)
-)
-
-// Watch signals the run goroutine to begin watching the input filename
-func Watch(fname string) error {
- return watch(&watchInfo{
- fname: fname,
- })
-}
-
-// Watch create signals the run goroutine to begin watching the input filename
-// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
-func WatchCreate(fname string) error {
- return watch(&watchInfo{
- op: fsnotify.Create,
- fname: fname,
- })
-}
-
-func watch(winfo *watchInfo) error {
- // start running the shared InotifyTracker if not already running
- once.Do(goRun)
-
- winfo.fname = filepath.Clean(winfo.fname)
- shared.watch <- winfo
- return <-shared.error
-}
-
-// RemoveWatch signals the run goroutine to remove the watch for the input filename
-func RemoveWatch(fname string) {
- remove(&watchInfo{
- fname: fname,
- })
-}
-
-// RemoveWatch create signals the run goroutine to remove the watch for the input filename
-func RemoveWatchCreate(fname string) {
- remove(&watchInfo{
- op: fsnotify.Create,
- fname: fname,
- })
-}
-
-func remove(winfo *watchInfo) {
- // start running the shared InotifyTracker if not already running
- once.Do(goRun)
-
- winfo.fname = filepath.Clean(winfo.fname)
- shared.mux.Lock()
- done := shared.done[winfo.fname]
- if done != nil {
- delete(shared.done, winfo.fname)
- close(done)
- }
-
- fname := winfo.fname
- if winfo.isCreate() {
- // Watch for new files to be created in the parent directory.
- fname = filepath.Dir(fname)
- }
- shared.watchNums[fname]--
- watchNum := shared.watchNums[fname]
- if watchNum == 0 {
- delete(shared.watchNums, fname)
- }
- shared.mux.Unlock()
-
- // If we were the last ones to watch this file, unsubscribe from inotify.
- // This needs to happen after releasing the lock because fsnotify waits
- // synchronously for the kernel to acknowledge the removal of the watch
- // for this file, which causes us to deadlock if we still held the lock.
- if watchNum == 0 {
- shared.watcher.Remove(fname)
- }
- shared.remove <- winfo
-}
-
-// Events returns a channel to which FileEvents corresponding to the input filename
-// will be sent. This channel will be closed when removeWatch is called on this
-// filename.
-func Events(fname string) <-chan fsnotify.Event {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- return shared.chans[fname]
-}
-
-// Cleanup removes the watch for the input filename if necessary.
-func Cleanup(fname string) {
- RemoveWatch(fname)
-}
-
-// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
-// a new Watcher if the previous Watcher was closed.
-func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- if shared.chans[winfo.fname] == nil {
- shared.chans[winfo.fname] = make(chan fsnotify.Event)
- shared.done[winfo.fname] = make(chan bool)
- }
-
- fname := winfo.fname
- if winfo.isCreate() {
- // Watch for new files to be created in the parent directory.
- fname = filepath.Dir(fname)
- }
-
- // already in inotify watch
- if shared.watchNums[fname] > 0 {
- shared.watchNums[fname]++
- if winfo.isCreate() {
- shared.watchNums[winfo.fname]++
- }
- return nil
- }
-
- err := shared.watcher.Add(fname)
- if err == nil {
- shared.watchNums[fname]++
- if winfo.isCreate() {
- shared.watchNums[winfo.fname]++
- }
- }
- return err
-}
-
-// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
-// corresponding events channel.
-func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- ch := shared.chans[winfo.fname]
- if ch == nil {
- return
- }
-
- delete(shared.chans, winfo.fname)
- close(ch)
-
- if !winfo.isCreate() {
- return
- }
-
- shared.watchNums[winfo.fname]--
- if shared.watchNums[winfo.fname] == 0 {
- delete(shared.watchNums, winfo.fname)
- }
-}
-
-// sendEvent sends the input event to the appropriate Tail.
-func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
- name := filepath.Clean(event.Name)
-
- shared.mux.Lock()
- ch := shared.chans[name]
- done := shared.done[name]
- shared.mux.Unlock()
-
- if ch != nil && done != nil {
- select {
- case ch <- event:
- case <-done:
- }
- }
-}
-
-// run starts the goroutine in which the shared struct reads events from its
-// Watcher's Event channel and sends the events to the appropriate Tail.
-func (shared *InotifyTracker) run() {
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- util.Fatal("failed to create Watcher")
- }
- shared.watcher = watcher
-
- for {
- select {
- case winfo := <-shared.watch:
- shared.error <- shared.addWatch(winfo)
-
- case winfo := <-shared.remove:
- shared.removeWatch(winfo)
-
- case event, open := <-shared.watcher.Events:
- if !open {
- return
- }
- shared.sendEvent(event)
-
- case err, open := <-shared.watcher.Errors:
- if !open {
- return
- } else if err != nil {
- sysErr, ok := err.(*os.SyscallError)
- if !ok || sysErr.Err != syscall.EINTR {
- logger.Printf("Error in Watcher Error channel: %s", err)
- }
- }
- }
- }
-}
diff --git a/vendor/github.com/hpcloud/tail/watch/polling.go b/vendor/github.com/hpcloud/tail/watch/polling.go
deleted file mode 100644
index 49491f21..00000000
--- a/vendor/github.com/hpcloud/tail/watch/polling.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "os"
- "runtime"
- "time"
-
- "github.com/hpcloud/tail/util"
- "gopkg.in/tomb.v1"
-)
-
-// PollingFileWatcher polls the file for changes.
-type PollingFileWatcher struct {
- Filename string
- Size int64
-}
-
-func NewPollingFileWatcher(filename string) *PollingFileWatcher {
- fw := &PollingFileWatcher{filename, 0}
- return fw
-}
-
-var POLL_DURATION time.Duration
-
-func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
- for {
- if _, err := os.Stat(fw.Filename); err == nil {
- return nil
- } else if !os.IsNotExist(err) {
- return err
- }
- select {
- case <-time.After(POLL_DURATION):
- continue
- case <-t.Dying():
- return tomb.ErrDying
- }
- }
- panic("unreachable")
-}
-
-func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
- origFi, err := os.Stat(fw.Filename)
- if err != nil {
- return nil, err
- }
-
- changes := NewFileChanges()
- var prevModTime time.Time
-
- // XXX: use tomb.Tomb to cleanly manage these goroutines. replace
- // the fatal (below) with tomb's Kill.
-
- fw.Size = pos
-
- go func() {
- prevSize := fw.Size
- for {
- select {
- case <-t.Dying():
- return
- default:
- }
-
- time.Sleep(POLL_DURATION)
- fi, err := os.Stat(fw.Filename)
- if err != nil {
- // Windows cannot delete a file if a handle is still open (tail keeps one open)
- // so it gives access denied to anything trying to read it until all handles are released.
- if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
- // File does not exist (has been deleted).
- changes.NotifyDeleted()
- return
- }
-
- // XXX: report this error back to the user
- util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
- }
-
- // File got moved/renamed?
- if !os.SameFile(origFi, fi) {
- changes.NotifyDeleted()
- return
- }
-
- // File got truncated?
- fw.Size = fi.Size()
- if prevSize > 0 && prevSize > fw.Size {
- changes.NotifyTruncated()
- prevSize = fw.Size
- continue
- }
- // File got bigger?
- if prevSize > 0 && prevSize < fw.Size {
- changes.NotifyModified()
- prevSize = fw.Size
- continue
- }
- prevSize = fw.Size
-
- // File was appended to (changed)?
- modTime := fi.ModTime()
- if modTime != prevModTime {
- prevModTime = modTime
- changes.NotifyModified()
- }
- }
- }()
-
- return changes, nil
-}
-
-func init() {
- POLL_DURATION = 250 * time.Millisecond
-}
diff --git a/vendor/github.com/hpcloud/tail/watch/watch.go b/vendor/github.com/hpcloud/tail/watch/watch.go
deleted file mode 100644
index 2e1783ef..00000000
--- a/vendor/github.com/hpcloud/tail/watch/watch.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import "gopkg.in/tomb.v1"
-
-// FileWatcher monitors file-level events.
-type FileWatcher interface {
- // BlockUntilExists blocks until the file comes into existence.
- BlockUntilExists(*tomb.Tomb) error
-
- // ChangeEvents reports on changes to a file, be it modification,
- // deletion, renames or truncations. Returned FileChanges group of
- // channels will be closed, thus become unusable, after a deletion
- // or truncation event.
- // In order to properly report truncations, ChangeEvents requires
- // the caller to pass their current offset in the file.
- ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
-}
diff --git a/vendor/github.com/hpcloud/tail/winfile/winfile.go b/vendor/github.com/hpcloud/tail/winfile/winfile.go
deleted file mode 100644
index aa7e7bc5..00000000
--- a/vendor/github.com/hpcloud/tail/winfile/winfile.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// +build windows
-
-package winfile
-
-import (
- "os"
- "syscall"
- "unsafe"
-)
-
-// issue also described here
-//https://codereview.appspot.com/8203043/
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
-func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- var access uint32
- switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
- case syscall.O_RDONLY:
- access = syscall.GENERIC_READ
- case syscall.O_WRONLY:
- access = syscall.GENERIC_WRITE
- case syscall.O_RDWR:
- access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
- }
- if mode&syscall.O_CREAT != 0 {
- access |= syscall.GENERIC_WRITE
- }
- if mode&syscall.O_APPEND != 0 {
- access &^= syscall.GENERIC_WRITE
- access |= syscall.FILE_APPEND_DATA
- }
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
- var sa *syscall.SecurityAttributes
- if mode&syscall.O_CLOEXEC == 0 {
- sa = makeInheritSa()
- }
- var createmode uint32
- switch {
- case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
- createmode = syscall.CREATE_NEW
- case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
- createmode = syscall.CREATE_ALWAYS
- case mode&syscall.O_CREAT == syscall.O_CREAT:
- createmode = syscall.OPEN_ALWAYS
- case mode&syscall.O_TRUNC == syscall.O_TRUNC:
- createmode = syscall.TRUNCATE_EXISTING
- default:
- createmode = syscall.OPEN_EXISTING
- }
- h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
- return h, e
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
-func makeInheritSa() *syscall.SecurityAttributes {
- var sa syscall.SecurityAttributes
- sa.Length = uint32(unsafe.Sizeof(sa))
- sa.InheritHandle = 1
- return &sa
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
-func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
- r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
- if e != nil {
- return nil, e
- }
- return os.NewFile(uintptr(r), name), nil
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
-func syscallMode(i os.FileMode) (o uint32) {
- o |= uint32(i.Perm())
- if i&os.ModeSetuid != 0 {
- o |= syscall.S_ISUID
- }
- if i&os.ModeSetgid != 0 {
- o |= syscall.S_ISGID
- }
- if i&os.ModeSticky != 0 {
- o |= syscall.S_ISVTX
- }
- // No mapping for Go's ModeTemporary (plan9 only).
- return
-}
diff --git a/vendor/github.com/jefferai/jsonx/LICENSE b/vendor/github.com/jefferai/jsonx/LICENSE
deleted file mode 100644
index a612ad98..00000000
--- a/vendor/github.com/jefferai/jsonx/LICENSE
+++ /dev/null
@@ -1,373 +0,0 @@
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/jefferai/jsonx/README.md b/vendor/github.com/jefferai/jsonx/README.md
deleted file mode 100644
index a7bb5bac..00000000
--- a/vendor/github.com/jefferai/jsonx/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-JSONx
-========
-
-[![GoDoc](https://godoc.org/github.com/jefferai/jsonx?status.svg)](https://godoc.org/github.com/jefferai/jsonx)
-
-A Go (Golang) library to transform an object or existing JSON bytes into
-[JSONx](https://www.ibm.com/support/knowledgecenter/SS9H2Y_7.5.0/com.ibm.dp.doc/json_jsonxconversionrules.html).
-Because sometimes your luck runs out.
-
-This follows the "standard" except for the handling of special and escaped
-characters. Names and values are properly XML-escaped but there is no special
-handling of values already escaped in JSON if they are valid in XML.
diff --git a/vendor/github.com/jefferai/jsonx/go.mod b/vendor/github.com/jefferai/jsonx/go.mod
deleted file mode 100644
index eaf7062a..00000000
--- a/vendor/github.com/jefferai/jsonx/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/jefferai/jsonx
-
-require github.com/Jeffail/gabs v1.1.1
diff --git a/vendor/github.com/jefferai/jsonx/go.sum b/vendor/github.com/jefferai/jsonx/go.sum
deleted file mode 100644
index 4169e3d0..00000000
--- a/vendor/github.com/jefferai/jsonx/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E=
-github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
diff --git a/vendor/github.com/jefferai/jsonx/jsonx.go b/vendor/github.com/jefferai/jsonx/jsonx.go
deleted file mode 100644
index 93d24a9b..00000000
--- a/vendor/github.com/jefferai/jsonx/jsonx.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package jsonx
-
-import (
- "bytes"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "sort"
-
- "github.com/Jeffail/gabs"
-)
-
-const (
- XMLHeader = ``
- Header = ``
- Footer = ``
-)
-
-// namedContainer wraps a gabs.Container to carry name information with it
-type namedContainer struct {
- name string
- *gabs.Container
-}
-
-// Marshal marshals the input data into JSONx.
-func Marshal(input interface{}) (string, error) {
- jsonBytes, err := json.Marshal(input)
- if err != nil {
- return "", err
- }
- xmlBytes, err := EncodeJSONBytes(jsonBytes)
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%s%s%s%s", XMLHeader, Header, string(xmlBytes), Footer), nil
-}
-
-// EncodeJSONBytes encodes JSON-formatted bytes into JSONx. It is designed to
-// be used for multiple entries so does not prepend the JSONx header tag or
-// append the JSONx footer tag. You can use jsonx.Header and jsonx.Footer to
-// easily add these when necessary.
-func EncodeJSONBytes(input []byte) ([]byte, error) {
- o := bytes.NewBuffer(nil)
- reader := bytes.NewReader(input)
- dec := json.NewDecoder(reader)
- dec.UseNumber()
-
- cont, err := gabs.ParseJSONDecoder(dec)
- if err != nil {
- return nil, err
- }
-
- if err := sortAndTransformObject(o, &namedContainer{Container: cont}); err != nil {
- return nil, err
- }
-
- return o.Bytes(), nil
-}
-
-func transformContainer(o *bytes.Buffer, cont *namedContainer) error {
- var printName string
-
- if cont.name != "" {
- escapedNameBuf := bytes.NewBuffer(nil)
- err := xml.EscapeText(escapedNameBuf, []byte(cont.name))
- if err != nil {
- return err
- }
- printName = fmt.Sprintf(" name=\"%s\"", escapedNameBuf.String())
- }
-
- data := cont.Data()
- switch data.(type) {
- case nil:
- o.WriteString(fmt.Sprintf("", printName))
-
- case bool:
- o.WriteString(fmt.Sprintf("%t", printName, data))
-
- case json.Number:
- o.WriteString(fmt.Sprintf("%v", printName, data))
-
- case string:
- o.WriteString(fmt.Sprintf("%v", printName, data))
-
- case []interface{}:
- o.WriteString(fmt.Sprintf("", printName))
- arrayChildren, err := cont.Children()
- if err != nil {
- return err
- }
- for _, child := range arrayChildren {
- if err := transformContainer(o, &namedContainer{Container: child}); err != nil {
- return err
- }
- }
- o.WriteString("")
-
- case map[string]interface{}:
- o.WriteString(fmt.Sprintf("", printName))
-
- if err := sortAndTransformObject(o, cont); err != nil {
- return err
- }
-
- o.WriteString("")
- }
-
- return nil
-}
-
-// sortAndTransformObject sorts object keys to make the output predictable so
-// the package can be tested; logic is here to prevent code duplication
-func sortAndTransformObject(o *bytes.Buffer, cont *namedContainer) error {
- objectChildren, err := cont.ChildrenMap()
- if err != nil {
- return err
- }
-
- sortedNames := make([]string, 0, len(objectChildren))
- for name, _ := range objectChildren {
- sortedNames = append(sortedNames, name)
- }
- sort.Strings(sortedNames)
- for _, name := range sortedNames {
- if err := transformContainer(o, &namedContainer{name: name, Container: objectChildren[name]}); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/keybase/go-crypto/AUTHORS b/vendor/github.com/keybase/go-crypto/AUTHORS
deleted file mode 100644
index 15167cd7..00000000
--- a/vendor/github.com/keybase/go-crypto/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/keybase/go-crypto/CONTRIBUTORS b/vendor/github.com/keybase/go-crypto/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9..00000000
--- a/vendor/github.com/keybase/go-crypto/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/keybase/go-crypto/LICENSE b/vendor/github.com/keybase/go-crypto/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/github.com/keybase/go-crypto/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/keybase/go-crypto/PATENTS b/vendor/github.com/keybase/go-crypto/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/github.com/keybase/go-crypto/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go b/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go
deleted file mode 100644
index 77fb8b9a..00000000
--- a/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Package brainpool implements Brainpool elliptic curves.
-// Implementation of rcurves is from github.com/ebfe/brainpool
-// Note that these curves are implemented with naive, non-constant time operations
-// and are likely not suitable for enviroments where timing attacks are a concern.
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
- "sync"
-)
-
-var (
- once sync.Once
- p256t1, p384t1, p512t1 *elliptic.CurveParams
- p256r1, p384r1, p512r1 *rcurve
-)
-
-func initAll() {
- initP256t1()
- initP384t1()
- initP512t1()
- initP256r1()
- initP384r1()
- initP512r1()
-}
-
-func initP256t1() {
- p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"}
- p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16)
- p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16)
- p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16)
- p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16)
- p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16)
- p256t1.BitSize = 256
-}
-
-func initP256r1() {
- twisted := p256t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP256r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16)
- params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16)
- z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16)
- p256r1 = newrcurve(twisted, params, z)
-}
-
-func initP384t1() {
- p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"}
- p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16)
- p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16)
- p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16)
- p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16)
- p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16)
- p384t1.BitSize = 384
-}
-
-func initP384r1() {
- twisted := p384t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP384r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16)
- params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16)
- z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16)
- p384r1 = newrcurve(twisted, params, z)
-}
-
-func initP512t1() {
- p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"}
- p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16)
- p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16)
- p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16)
- p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16)
- p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16)
- p512t1.BitSize = 512
-}
-
-func initP512r1() {
- twisted := p512t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP512r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16)
- params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16)
- z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16)
- p512r1 = newrcurve(twisted, params, z)
-}
-
-// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4)
-func P256t1() elliptic.Curve {
- once.Do(initAll)
- return p256t1
-}
-
-// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4)
-func P256r1() elliptic.Curve {
- once.Do(initAll)
- return p256r1
-}
-
-// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6)
-func P384t1() elliptic.Curve {
- once.Do(initAll)
- return p384t1
-}
-
-// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6)
-func P384r1() elliptic.Curve {
- once.Do(initAll)
- return p384r1
-}
-
-// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7)
-func P512t1() elliptic.Curve {
- once.Do(initAll)
- return p512t1
-}
-
-// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7)
-func P512r1() elliptic.Curve {
- once.Do(initAll)
- return p512r1
-}
diff --git a/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go b/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go
deleted file mode 100644
index 7e291d6a..00000000
--- a/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
-)
-
-var _ elliptic.Curve = (*rcurve)(nil)
-
-type rcurve struct {
- twisted elliptic.Curve
- params *elliptic.CurveParams
- z *big.Int
- zinv *big.Int
- z2 *big.Int
- z3 *big.Int
- zinv2 *big.Int
- zinv3 *big.Int
-}
-
-var (
- two = big.NewInt(2)
- three = big.NewInt(3)
-)
-
-func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve {
- zinv := new(big.Int).ModInverse(z, params.P)
- return &rcurve{
- twisted: twisted,
- params: params,
- z: z,
- zinv: zinv,
- z2: new(big.Int).Exp(z, two, params.P),
- z3: new(big.Int).Exp(z, three, params.P),
- zinv2: new(big.Int).Exp(zinv, two, params.P),
- zinv3: new(big.Int).Exp(zinv, three, params.P),
- }
-}
-
-func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) {
- var tx, ty big.Int
- tx.Mul(x, curve.z2)
- tx.Mod(&tx, curve.params.P)
- ty.Mul(y, curve.z3)
- ty.Mod(&ty, curve.params.P)
- return &tx, &ty
-}
-
-func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) {
- var x, y big.Int
- x.Mul(tx, curve.zinv2)
- x.Mod(&x, curve.params.P)
- y.Mul(ty, curve.zinv3)
- y.Mod(&y, curve.params.P)
- return &x, &y
-}
-
-func (curve *rcurve) Params() *elliptic.CurveParams {
- return curve.params
-}
-
-func (curve *rcurve) IsOnCurve(x, y *big.Int) bool {
- return curve.twisted.IsOnCurve(curve.toTwisted(x, y))
-}
-
-func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- tx2, ty2 := curve.toTwisted(x2, y2)
- return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2))
-}
-
-func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1)))
-}
-
-func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar))
-}
-
-func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar))
-}
diff --git a/vendor/github.com/keybase/go-crypto/cast5/cast5.go b/vendor/github.com/keybase/go-crypto/cast5/cast5.go
deleted file mode 100644
index e0207352..00000000
--- a/vendor/github.com/keybase/go-crypto/cast5/cast5.go
+++ /dev/null
@@ -1,526 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
-// OpenPGP cipher.
-package cast5 // import "github.com/keybase/go-crypto/cast5"
-
-import "errors"
-
-const BlockSize = 8
-const KeySize = 16
-
-type Cipher struct {
- masking [16]uint32
- rotate [16]uint8
-}
-
-func NewCipher(key []byte) (c *Cipher, err error) {
- if len(key) != KeySize {
- return nil, errors.New("CAST5: keys must be 16 bytes")
- }
-
- c = new(Cipher)
- c.keySchedule(key)
- return
-}
-
-func (c *Cipher) BlockSize() int {
- return BlockSize
-}
-
-func (c *Cipher) Encrypt(dst, src []byte) {
- l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
- r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
-
- l, r = r, l^f1(r, c.masking[0], c.rotate[0])
- l, r = r, l^f2(r, c.masking[1], c.rotate[1])
- l, r = r, l^f3(r, c.masking[2], c.rotate[2])
- l, r = r, l^f1(r, c.masking[3], c.rotate[3])
-
- l, r = r, l^f2(r, c.masking[4], c.rotate[4])
- l, r = r, l^f3(r, c.masking[5], c.rotate[5])
- l, r = r, l^f1(r, c.masking[6], c.rotate[6])
- l, r = r, l^f2(r, c.masking[7], c.rotate[7])
-
- l, r = r, l^f3(r, c.masking[8], c.rotate[8])
- l, r = r, l^f1(r, c.masking[9], c.rotate[9])
- l, r = r, l^f2(r, c.masking[10], c.rotate[10])
- l, r = r, l^f3(r, c.masking[11], c.rotate[11])
-
- l, r = r, l^f1(r, c.masking[12], c.rotate[12])
- l, r = r, l^f2(r, c.masking[13], c.rotate[13])
- l, r = r, l^f3(r, c.masking[14], c.rotate[14])
- l, r = r, l^f1(r, c.masking[15], c.rotate[15])
-
- dst[0] = uint8(r >> 24)
- dst[1] = uint8(r >> 16)
- dst[2] = uint8(r >> 8)
- dst[3] = uint8(r)
- dst[4] = uint8(l >> 24)
- dst[5] = uint8(l >> 16)
- dst[6] = uint8(l >> 8)
- dst[7] = uint8(l)
-}
-
-func (c *Cipher) Decrypt(dst, src []byte) {
- l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
- r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
-
- l, r = r, l^f1(r, c.masking[15], c.rotate[15])
- l, r = r, l^f3(r, c.masking[14], c.rotate[14])
- l, r = r, l^f2(r, c.masking[13], c.rotate[13])
- l, r = r, l^f1(r, c.masking[12], c.rotate[12])
-
- l, r = r, l^f3(r, c.masking[11], c.rotate[11])
- l, r = r, l^f2(r, c.masking[10], c.rotate[10])
- l, r = r, l^f1(r, c.masking[9], c.rotate[9])
- l, r = r, l^f3(r, c.masking[8], c.rotate[8])
-
- l, r = r, l^f2(r, c.masking[7], c.rotate[7])
- l, r = r, l^f1(r, c.masking[6], c.rotate[6])
- l, r = r, l^f3(r, c.masking[5], c.rotate[5])
- l, r = r, l^f2(r, c.masking[4], c.rotate[4])
-
- l, r = r, l^f1(r, c.masking[3], c.rotate[3])
- l, r = r, l^f3(r, c.masking[2], c.rotate[2])
- l, r = r, l^f2(r, c.masking[1], c.rotate[1])
- l, r = r, l^f1(r, c.masking[0], c.rotate[0])
-
- dst[0] = uint8(r >> 24)
- dst[1] = uint8(r >> 16)
- dst[2] = uint8(r >> 8)
- dst[3] = uint8(r)
- dst[4] = uint8(l >> 24)
- dst[5] = uint8(l >> 16)
- dst[6] = uint8(l >> 8)
- dst[7] = uint8(l)
-}
-
-type keyScheduleA [4][7]uint8
-type keyScheduleB [4][5]uint8
-
-// keyScheduleRound contains the magic values for a round of the key schedule.
-// The keyScheduleA deals with the lines like:
-// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
-// Conceptually, both x and z are in the same array, x first. The first
-// element describes which word of this array gets written to and the
-// second, which word gets read. So, for the line above, it's "4, 0", because
-// it's writing to the first word of z, which, being after x, is word 4, and
-// reading from the first word of x: word 0.
-//
-// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
-// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
-// that it's z that we're indexing.
-//
-// keyScheduleB deals with lines like:
-// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
-// "K1" is ignored because key words are always written in order. So the five
-// elements are the S-box indexes. They use the same form as in keyScheduleA,
-// above.
-
-type keyScheduleRound struct{}
-type keySchedule []keyScheduleRound
-
-var schedule = []struct {
- a keyScheduleA
- b keyScheduleB
-}{
- {
- keyScheduleA{
- {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
- {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
- {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
- {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
- },
- keyScheduleB{
- {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
- {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
- {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
- {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
- },
- },
- {
- keyScheduleA{
- {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
- {1, 4, 0, 2, 1, 3, 16 + 2},
- {2, 5, 7, 6, 5, 4, 16 + 1},
- {3, 7, 0xa, 9, 0xb, 8, 16 + 3},
- },
- keyScheduleB{
- {3, 2, 0xc, 0xd, 8},
- {1, 0, 0xe, 0xf, 0xd},
- {7, 6, 8, 9, 3},
- {5, 4, 0xa, 0xb, 7},
- },
- },
- {
- keyScheduleA{
- {4, 0, 0xd, 0xf, 0xc, 0xe, 8},
- {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
- {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
- {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
- },
- keyScheduleB{
- {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
- {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
- {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
- {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
- },
- },
- {
- keyScheduleA{
- {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
- {1, 4, 0, 2, 1, 3, 16 + 2},
- {2, 5, 7, 6, 5, 4, 16 + 1},
- {3, 7, 0xa, 9, 0xb, 8, 16 + 3},
- },
- keyScheduleB{
- {8, 9, 7, 6, 3},
- {0xa, 0xb, 5, 4, 7},
- {0xc, 0xd, 3, 2, 8},
- {0xe, 0xf, 1, 0, 0xd},
- },
- },
-}
-
-func (c *Cipher) keySchedule(in []byte) {
- var t [8]uint32
- var k [32]uint32
-
- for i := 0; i < 4; i++ {
- j := i * 4
- t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
- }
-
- x := []byte{6, 7, 4, 5}
- ki := 0
-
- for half := 0; half < 2; half++ {
- for _, round := range schedule {
- for j := 0; j < 4; j++ {
- var a [7]uint8
- copy(a[:], round.a[j][:])
- w := t[a[1]]
- w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
- w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
- w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
- w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
- w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
- t[a[0]] = w
- }
-
- for j := 0; j < 4; j++ {
- var b [5]uint8
- copy(b[:], round.b[j][:])
- w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
- w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
- w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
- w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
- w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
- k[ki] = w
- ki++
- }
- }
- }
-
- for i := 0; i < 16; i++ {
- c.masking[i] = k[i]
- c.rotate[i] = uint8(k[16+i] & 0x1f)
- }
-}
-
-// These are the three 'f' functions. See RFC 2144, section 2.2.
-func f1(d, m uint32, r uint8) uint32 {
- t := m + d
- I := (t << r) | (t >> (32 - r))
- return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
-}
-
-func f2(d, m uint32, r uint8) uint32 {
- t := m ^ d
- I := (t << r) | (t >> (32 - r))
- return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
-}
-
-func f3(d, m uint32, r uint8) uint32 {
- t := m - d
- I := (t << r) | (t >> (32 - r))
- return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
-}
-
-var sBox = [8][256]uint32{
- {
- 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
- 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
- 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
- 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
- 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
- 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
- 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
- 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
- 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
- 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
- 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
- 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
- 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
- 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
- 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
- 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
- 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
- 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
- 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
- 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
- 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
- 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
- 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
- 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
- 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
- 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
- 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
- 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
- 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
- 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
- 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
- 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
- },
- {
- 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
- 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
- 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
- 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
- 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
- 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
- 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
- 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
- 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
- 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
- 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
- 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
- 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
- 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
- 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
- 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
- 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
- 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
- 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
- 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
- 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
- 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
- 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
- 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
- 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
- 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
- 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
- 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
- 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
- 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
- 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
- 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
- },
- {
- 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
- 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
- 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
- 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
- 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
- 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
- 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
- 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
- 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
- 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
- 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
- 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
- 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
- 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
- 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
- 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
- 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
- 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
- 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
- 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
- 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
- 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
- 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
- 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
- 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
- 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
- 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
- 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
- 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
- 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
- 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
- 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
- },
- {
- 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
- 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
- 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
- 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
- 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
- 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
- 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
- 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
- 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
- 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
- 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
- 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
- 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
- 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
- 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
- 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
- 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
- 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
- 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
- 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
- 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
- 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
- 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
- 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
- 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
- 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
- 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
- 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
- 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
- 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
- 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
- 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
- },
- {
- 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
- 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
- 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
- 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
- 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
- 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
- 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
- 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
- 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
- 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
- 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
- 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
- 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
- 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
- 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
- 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
- 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
- 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
- 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
- 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
- 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
- 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
- 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
- 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
- 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
- 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
- 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
- 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
- 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
- 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
- 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
- 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
- },
- {
- 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
- 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
- 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
- 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
- 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
- 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
- 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
- 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
- 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
- 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
- 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
- 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
- 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
- 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
- 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
- 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
- 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
- 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
- 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
- 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
- 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
- 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
- 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
- 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
- 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
- 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
- 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
- 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
- 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
- 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
- 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
- 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
- },
- {
- 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
- 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
- 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
- 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
- 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
- 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
- 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
- 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
- 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
- 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
- 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
- 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
- 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
- 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
- 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
- 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
- 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
- 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
- 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
- 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
- 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
- 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
- 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
- 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
- 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
- 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
- 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
- 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
- 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
- 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
- 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
- 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
- },
- {
- 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
- 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
- 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
- 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
- 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
- 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
- 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
- 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
- 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
- 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
- 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
- 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
- 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
- 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
- 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
- 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
- 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
- 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
- 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
- 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
- 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
- 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
- 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
- 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
- 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
- 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
- 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
- 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
- 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
- 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
- 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
- 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
- },
-}
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h b/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h
deleted file mode 100644
index b3f74162..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.h
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-#define REDMASK51 0x0007FFFFFFFFFFFF
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s
deleted file mode 100644
index ee7b4bd5..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/const_amd64.s
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-// These constants cannot be encoded in non-MOVQ immediates.
-// We access them directly from memory instead.
-
-DATA ·_121666_213(SB)/8, $996687872
-GLOBL ·_121666_213(SB), 8, $8
-
-DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
-GLOBL ·_2P0(SB), 8, $8
-
-DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
-GLOBL ·_2P1234(SB), 8, $8
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s
deleted file mode 100644
index cd793a5b..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/cswap_amd64.s
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64,!gccgo,!appengine
-
-// func cswap(inout *[4][5]uint64, v uint64)
-TEXT ·cswap(SB),7,$0
- MOVQ inout+0(FP),DI
- MOVQ v+8(FP),SI
-
- SUBQ $1, SI
- NOTQ SI
- MOVQ SI, X15
- PSHUFD $0x44, X15, X15
-
- MOVOU 0(DI), X0
- MOVOU 16(DI), X2
- MOVOU 32(DI), X4
- MOVOU 48(DI), X6
- MOVOU 64(DI), X8
- MOVOU 80(DI), X1
- MOVOU 96(DI), X3
- MOVOU 112(DI), X5
- MOVOU 128(DI), X7
- MOVOU 144(DI), X9
-
- MOVO X1, X10
- MOVO X3, X11
- MOVO X5, X12
- MOVO X7, X13
- MOVO X9, X14
-
- PXOR X0, X10
- PXOR X2, X11
- PXOR X4, X12
- PXOR X6, X13
- PXOR X8, X14
- PAND X15, X10
- PAND X15, X11
- PAND X15, X12
- PAND X15, X13
- PAND X15, X14
- PXOR X10, X0
- PXOR X10, X1
- PXOR X11, X2
- PXOR X11, X3
- PXOR X12, X4
- PXOR X12, X5
- PXOR X13, X6
- PXOR X13, X7
- PXOR X14, X8
- PXOR X14, X9
-
- MOVOU X0, 0(DI)
- MOVOU X2, 16(DI)
- MOVOU X4, 32(DI)
- MOVOU X6, 48(DI)
- MOVOU X8, 64(DI)
- MOVOU X1, 80(DI)
- MOVOU X3, 96(DI)
- MOVOU X5, 112(DI)
- MOVOU X7, 128(DI)
- MOVOU X9, 144(DI)
- RET
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go b/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go
deleted file mode 100644
index cb8fbc57..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/curve25519.go
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// We have an implementation in amd64 assembly so this code is only run on
-// non-amd64 platforms. The amd64 assembly does not support gccgo.
-// +build !amd64 gccgo appengine
-
-package curve25519
-
-import (
- "encoding/binary"
-)
-
-// This code is a port of the public domain, "ref10" implementation of
-// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
-
-// fieldElement represents an element of the field GF(2^255 - 19). An element
-// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
-// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
-// context.
-type fieldElement [10]int32
-
-func feZero(fe *fieldElement) {
- for i := range fe {
- fe[i] = 0
- }
-}
-
-func feOne(fe *fieldElement) {
- feZero(fe)
- fe[0] = 1
-}
-
-func feAdd(dst, a, b *fieldElement) {
- for i := range dst {
- dst[i] = a[i] + b[i]
- }
-}
-
-func feSub(dst, a, b *fieldElement) {
- for i := range dst {
- dst[i] = a[i] - b[i]
- }
-}
-
-func feCopy(dst, src *fieldElement) {
- for i := range dst {
- dst[i] = src[i]
- }
-}
-
-// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
-//
-// Preconditions: b in {0,1}.
-func feCSwap(f, g *fieldElement, b int32) {
- b = -b
- for i := range f {
- t := b & (f[i] ^ g[i])
- f[i] ^= t
- g[i] ^= t
- }
-}
-
-// load3 reads a 24-bit, little-endian value from in.
-func load3(in []byte) int64 {
- var r int64
- r = int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- return r
-}
-
-// load4 reads a 32-bit, little-endian value from in.
-func load4(in []byte) int64 {
- return int64(binary.LittleEndian.Uint32(in))
-}
-
-func feFromBytes(dst *fieldElement, src *[32]byte) {
- h0 := load4(src[:])
- h1 := load3(src[4:]) << 6
- h2 := load3(src[7:]) << 5
- h3 := load3(src[10:]) << 3
- h4 := load3(src[13:]) << 2
- h5 := load4(src[16:])
- h6 := load3(src[20:]) << 7
- h7 := load3(src[23:]) << 5
- h8 := load3(src[26:]) << 4
- h9 := load3(src[29:]) << 2
-
- var carry [10]int64
- carry[9] = (h9 + 1<<24) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
- carry[1] = (h1 + 1<<24) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[3] = (h3 + 1<<24) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[5] = (h5 + 1<<24) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
- carry[7] = (h7 + 1<<24) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
-
- carry[0] = (h0 + 1<<25) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[2] = (h2 + 1<<25) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[4] = (h4 + 1<<25) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[6] = (h6 + 1<<25) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
- carry[8] = (h8 + 1<<25) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
-
- dst[0] = int32(h0)
- dst[1] = int32(h1)
- dst[2] = int32(h2)
- dst[3] = int32(h3)
- dst[4] = int32(h4)
- dst[5] = int32(h5)
- dst[6] = int32(h6)
- dst[7] = int32(h7)
- dst[8] = int32(h8)
- dst[9] = int32(h9)
-}
-
-// feToBytes marshals h to s.
-// Preconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Write p=2^255-19; q=floor(h/p).
-// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
-//
-// Proof:
-// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
-// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
-//
-// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
-// Then 0> 25
- q = (h[0] + q) >> 26
- q = (h[1] + q) >> 25
- q = (h[2] + q) >> 26
- q = (h[3] + q) >> 25
- q = (h[4] + q) >> 26
- q = (h[5] + q) >> 25
- q = (h[6] + q) >> 26
- q = (h[7] + q) >> 25
- q = (h[8] + q) >> 26
- q = (h[9] + q) >> 25
-
- // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
- h[0] += 19 * q
- // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
-
- carry[0] = h[0] >> 26
- h[1] += carry[0]
- h[0] -= carry[0] << 26
- carry[1] = h[1] >> 25
- h[2] += carry[1]
- h[1] -= carry[1] << 25
- carry[2] = h[2] >> 26
- h[3] += carry[2]
- h[2] -= carry[2] << 26
- carry[3] = h[3] >> 25
- h[4] += carry[3]
- h[3] -= carry[3] << 25
- carry[4] = h[4] >> 26
- h[5] += carry[4]
- h[4] -= carry[4] << 26
- carry[5] = h[5] >> 25
- h[6] += carry[5]
- h[5] -= carry[5] << 25
- carry[6] = h[6] >> 26
- h[7] += carry[6]
- h[6] -= carry[6] << 26
- carry[7] = h[7] >> 25
- h[8] += carry[7]
- h[7] -= carry[7] << 25
- carry[8] = h[8] >> 26
- h[9] += carry[8]
- h[8] -= carry[8] << 26
- carry[9] = h[9] >> 25
- h[9] -= carry[9] << 25
- // h10 = carry9
-
- // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
- // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
- // evidently 2^255 h10-2^255 q = 0.
- // Goal: Output h[0]+...+2^230 h[9].
-
- s[0] = byte(h[0] >> 0)
- s[1] = byte(h[0] >> 8)
- s[2] = byte(h[0] >> 16)
- s[3] = byte((h[0] >> 24) | (h[1] << 2))
- s[4] = byte(h[1] >> 6)
- s[5] = byte(h[1] >> 14)
- s[6] = byte((h[1] >> 22) | (h[2] << 3))
- s[7] = byte(h[2] >> 5)
- s[8] = byte(h[2] >> 13)
- s[9] = byte((h[2] >> 21) | (h[3] << 5))
- s[10] = byte(h[3] >> 3)
- s[11] = byte(h[3] >> 11)
- s[12] = byte((h[3] >> 19) | (h[4] << 6))
- s[13] = byte(h[4] >> 2)
- s[14] = byte(h[4] >> 10)
- s[15] = byte(h[4] >> 18)
- s[16] = byte(h[5] >> 0)
- s[17] = byte(h[5] >> 8)
- s[18] = byte(h[5] >> 16)
- s[19] = byte((h[5] >> 24) | (h[6] << 1))
- s[20] = byte(h[6] >> 7)
- s[21] = byte(h[6] >> 15)
- s[22] = byte((h[6] >> 23) | (h[7] << 3))
- s[23] = byte(h[7] >> 5)
- s[24] = byte(h[7] >> 13)
- s[25] = byte((h[7] >> 21) | (h[8] << 4))
- s[26] = byte(h[8] >> 4)
- s[27] = byte(h[8] >> 12)
- s[28] = byte((h[8] >> 20) | (h[9] << 6))
- s[29] = byte(h[9] >> 2)
- s[30] = byte(h[9] >> 10)
- s[31] = byte(h[9] >> 18)
-}
-
-// feMul calculates h = f * g
-// Can overlap h with f or g.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Notes on implementation strategy:
-//
-// Using schoolbook multiplication.
-// Karatsuba would save a little in some cost models.
-//
-// Most multiplications by 2 and 19 are 32-bit precomputations;
-// cheaper than 64-bit postcomputations.
-//
-// There is one remaining multiplication by 19 in the carry chain;
-// one *19 precomputation can be merged into this,
-// but the resulting data flow is considerably less clean.
-//
-// There are 12 carries below.
-// 10 of them are 2-way parallelizable and vectorizable.
-// Can get away with 11 carries, but then data flow is much deeper.
-//
-// With tighter constraints on inputs can squeeze carries into int32.
-func feMul(h, f, g *fieldElement) {
- f0 := f[0]
- f1 := f[1]
- f2 := f[2]
- f3 := f[3]
- f4 := f[4]
- f5 := f[5]
- f6 := f[6]
- f7 := f[7]
- f8 := f[8]
- f9 := f[9]
- g0 := g[0]
- g1 := g[1]
- g2 := g[2]
- g3 := g[3]
- g4 := g[4]
- g5 := g[5]
- g6 := g[6]
- g7 := g[7]
- g8 := g[8]
- g9 := g[9]
- g1_19 := 19 * g1 // 1.4*2^29
- g2_19 := 19 * g2 // 1.4*2^30; still ok
- g3_19 := 19 * g3
- g4_19 := 19 * g4
- g5_19 := 19 * g5
- g6_19 := 19 * g6
- g7_19 := 19 * g7
- g8_19 := 19 * g8
- g9_19 := 19 * g9
- f1_2 := 2 * f1
- f3_2 := 2 * f3
- f5_2 := 2 * f5
- f7_2 := 2 * f7
- f9_2 := 2 * f9
- f0g0 := int64(f0) * int64(g0)
- f0g1 := int64(f0) * int64(g1)
- f0g2 := int64(f0) * int64(g2)
- f0g3 := int64(f0) * int64(g3)
- f0g4 := int64(f0) * int64(g4)
- f0g5 := int64(f0) * int64(g5)
- f0g6 := int64(f0) * int64(g6)
- f0g7 := int64(f0) * int64(g7)
- f0g8 := int64(f0) * int64(g8)
- f0g9 := int64(f0) * int64(g9)
- f1g0 := int64(f1) * int64(g0)
- f1g1_2 := int64(f1_2) * int64(g1)
- f1g2 := int64(f1) * int64(g2)
- f1g3_2 := int64(f1_2) * int64(g3)
- f1g4 := int64(f1) * int64(g4)
- f1g5_2 := int64(f1_2) * int64(g5)
- f1g6 := int64(f1) * int64(g6)
- f1g7_2 := int64(f1_2) * int64(g7)
- f1g8 := int64(f1) * int64(g8)
- f1g9_38 := int64(f1_2) * int64(g9_19)
- f2g0 := int64(f2) * int64(g0)
- f2g1 := int64(f2) * int64(g1)
- f2g2 := int64(f2) * int64(g2)
- f2g3 := int64(f2) * int64(g3)
- f2g4 := int64(f2) * int64(g4)
- f2g5 := int64(f2) * int64(g5)
- f2g6 := int64(f2) * int64(g6)
- f2g7 := int64(f2) * int64(g7)
- f2g8_19 := int64(f2) * int64(g8_19)
- f2g9_19 := int64(f2) * int64(g9_19)
- f3g0 := int64(f3) * int64(g0)
- f3g1_2 := int64(f3_2) * int64(g1)
- f3g2 := int64(f3) * int64(g2)
- f3g3_2 := int64(f3_2) * int64(g3)
- f3g4 := int64(f3) * int64(g4)
- f3g5_2 := int64(f3_2) * int64(g5)
- f3g6 := int64(f3) * int64(g6)
- f3g7_38 := int64(f3_2) * int64(g7_19)
- f3g8_19 := int64(f3) * int64(g8_19)
- f3g9_38 := int64(f3_2) * int64(g9_19)
- f4g0 := int64(f4) * int64(g0)
- f4g1 := int64(f4) * int64(g1)
- f4g2 := int64(f4) * int64(g2)
- f4g3 := int64(f4) * int64(g3)
- f4g4 := int64(f4) * int64(g4)
- f4g5 := int64(f4) * int64(g5)
- f4g6_19 := int64(f4) * int64(g6_19)
- f4g7_19 := int64(f4) * int64(g7_19)
- f4g8_19 := int64(f4) * int64(g8_19)
- f4g9_19 := int64(f4) * int64(g9_19)
- f5g0 := int64(f5) * int64(g0)
- f5g1_2 := int64(f5_2) * int64(g1)
- f5g2 := int64(f5) * int64(g2)
- f5g3_2 := int64(f5_2) * int64(g3)
- f5g4 := int64(f5) * int64(g4)
- f5g5_38 := int64(f5_2) * int64(g5_19)
- f5g6_19 := int64(f5) * int64(g6_19)
- f5g7_38 := int64(f5_2) * int64(g7_19)
- f5g8_19 := int64(f5) * int64(g8_19)
- f5g9_38 := int64(f5_2) * int64(g9_19)
- f6g0 := int64(f6) * int64(g0)
- f6g1 := int64(f6) * int64(g1)
- f6g2 := int64(f6) * int64(g2)
- f6g3 := int64(f6) * int64(g3)
- f6g4_19 := int64(f6) * int64(g4_19)
- f6g5_19 := int64(f6) * int64(g5_19)
- f6g6_19 := int64(f6) * int64(g6_19)
- f6g7_19 := int64(f6) * int64(g7_19)
- f6g8_19 := int64(f6) * int64(g8_19)
- f6g9_19 := int64(f6) * int64(g9_19)
- f7g0 := int64(f7) * int64(g0)
- f7g1_2 := int64(f7_2) * int64(g1)
- f7g2 := int64(f7) * int64(g2)
- f7g3_38 := int64(f7_2) * int64(g3_19)
- f7g4_19 := int64(f7) * int64(g4_19)
- f7g5_38 := int64(f7_2) * int64(g5_19)
- f7g6_19 := int64(f7) * int64(g6_19)
- f7g7_38 := int64(f7_2) * int64(g7_19)
- f7g8_19 := int64(f7) * int64(g8_19)
- f7g9_38 := int64(f7_2) * int64(g9_19)
- f8g0 := int64(f8) * int64(g0)
- f8g1 := int64(f8) * int64(g1)
- f8g2_19 := int64(f8) * int64(g2_19)
- f8g3_19 := int64(f8) * int64(g3_19)
- f8g4_19 := int64(f8) * int64(g4_19)
- f8g5_19 := int64(f8) * int64(g5_19)
- f8g6_19 := int64(f8) * int64(g6_19)
- f8g7_19 := int64(f8) * int64(g7_19)
- f8g8_19 := int64(f8) * int64(g8_19)
- f8g9_19 := int64(f8) * int64(g9_19)
- f9g0 := int64(f9) * int64(g0)
- f9g1_38 := int64(f9_2) * int64(g1_19)
- f9g2_19 := int64(f9) * int64(g2_19)
- f9g3_38 := int64(f9_2) * int64(g3_19)
- f9g4_19 := int64(f9) * int64(g4_19)
- f9g5_38 := int64(f9_2) * int64(g5_19)
- f9g6_19 := int64(f9) * int64(g6_19)
- f9g7_38 := int64(f9_2) * int64(g7_19)
- f9g8_19 := int64(f9) * int64(g8_19)
- f9g9_38 := int64(f9_2) * int64(g9_19)
- h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
- h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
- h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
- h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
- h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
- h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
- h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
- h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
- h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
- h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
- var carry [10]int64
-
- // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
- // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
- // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
- // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- // |h0| <= 2^25
- // |h4| <= 2^25
- // |h1| <= 1.51*2^58
- // |h5| <= 1.51*2^58
-
- carry[1] = (h1 + (1 << 24)) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[5] = (h5 + (1 << 24)) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
- // |h1| <= 2^24; from now on fits into int32
- // |h5| <= 2^24; from now on fits into int32
- // |h2| <= 1.21*2^59
- // |h6| <= 1.21*2^59
-
- carry[2] = (h2 + (1 << 25)) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[6] = (h6 + (1 << 25)) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
- // |h2| <= 2^25; from now on fits into int32 unchanged
- // |h6| <= 2^25; from now on fits into int32 unchanged
- // |h3| <= 1.51*2^58
- // |h7| <= 1.51*2^58
-
- carry[3] = (h3 + (1 << 24)) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[7] = (h7 + (1 << 24)) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
- // |h3| <= 2^24; from now on fits into int32 unchanged
- // |h7| <= 2^24; from now on fits into int32 unchanged
- // |h4| <= 1.52*2^33
- // |h8| <= 1.52*2^33
-
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[8] = (h8 + (1 << 25)) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
- // |h4| <= 2^25; from now on fits into int32 unchanged
- // |h8| <= 2^25; from now on fits into int32 unchanged
- // |h5| <= 1.01*2^24
- // |h9| <= 1.51*2^58
-
- carry[9] = (h9 + (1 << 24)) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
- // |h9| <= 2^24; from now on fits into int32 unchanged
- // |h0| <= 1.8*2^37
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- // |h0| <= 2^25; from now on fits into int32 unchanged
- // |h1| <= 1.01*2^24
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// feSquare calculates h = f*f. Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func feSquare(h, f *fieldElement) {
- f0 := f[0]
- f1 := f[1]
- f2 := f[2]
- f3 := f[3]
- f4 := f[4]
- f5 := f[5]
- f6 := f[6]
- f7 := f[7]
- f8 := f[8]
- f9 := f[9]
- f0_2 := 2 * f0
- f1_2 := 2 * f1
- f2_2 := 2 * f2
- f3_2 := 2 * f3
- f4_2 := 2 * f4
- f5_2 := 2 * f5
- f6_2 := 2 * f6
- f7_2 := 2 * f7
- f5_38 := 38 * f5 // 1.31*2^30
- f6_19 := 19 * f6 // 1.31*2^30
- f7_38 := 38 * f7 // 1.31*2^30
- f8_19 := 19 * f8 // 1.31*2^30
- f9_38 := 38 * f9 // 1.31*2^30
- f0f0 := int64(f0) * int64(f0)
- f0f1_2 := int64(f0_2) * int64(f1)
- f0f2_2 := int64(f0_2) * int64(f2)
- f0f3_2 := int64(f0_2) * int64(f3)
- f0f4_2 := int64(f0_2) * int64(f4)
- f0f5_2 := int64(f0_2) * int64(f5)
- f0f6_2 := int64(f0_2) * int64(f6)
- f0f7_2 := int64(f0_2) * int64(f7)
- f0f8_2 := int64(f0_2) * int64(f8)
- f0f9_2 := int64(f0_2) * int64(f9)
- f1f1_2 := int64(f1_2) * int64(f1)
- f1f2_2 := int64(f1_2) * int64(f2)
- f1f3_4 := int64(f1_2) * int64(f3_2)
- f1f4_2 := int64(f1_2) * int64(f4)
- f1f5_4 := int64(f1_2) * int64(f5_2)
- f1f6_2 := int64(f1_2) * int64(f6)
- f1f7_4 := int64(f1_2) * int64(f7_2)
- f1f8_2 := int64(f1_2) * int64(f8)
- f1f9_76 := int64(f1_2) * int64(f9_38)
- f2f2 := int64(f2) * int64(f2)
- f2f3_2 := int64(f2_2) * int64(f3)
- f2f4_2 := int64(f2_2) * int64(f4)
- f2f5_2 := int64(f2_2) * int64(f5)
- f2f6_2 := int64(f2_2) * int64(f6)
- f2f7_2 := int64(f2_2) * int64(f7)
- f2f8_38 := int64(f2_2) * int64(f8_19)
- f2f9_38 := int64(f2) * int64(f9_38)
- f3f3_2 := int64(f3_2) * int64(f3)
- f3f4_2 := int64(f3_2) * int64(f4)
- f3f5_4 := int64(f3_2) * int64(f5_2)
- f3f6_2 := int64(f3_2) * int64(f6)
- f3f7_76 := int64(f3_2) * int64(f7_38)
- f3f8_38 := int64(f3_2) * int64(f8_19)
- f3f9_76 := int64(f3_2) * int64(f9_38)
- f4f4 := int64(f4) * int64(f4)
- f4f5_2 := int64(f4_2) * int64(f5)
- f4f6_38 := int64(f4_2) * int64(f6_19)
- f4f7_38 := int64(f4) * int64(f7_38)
- f4f8_38 := int64(f4_2) * int64(f8_19)
- f4f9_38 := int64(f4) * int64(f9_38)
- f5f5_38 := int64(f5) * int64(f5_38)
- f5f6_38 := int64(f5_2) * int64(f6_19)
- f5f7_76 := int64(f5_2) * int64(f7_38)
- f5f8_38 := int64(f5_2) * int64(f8_19)
- f5f9_76 := int64(f5_2) * int64(f9_38)
- f6f6_19 := int64(f6) * int64(f6_19)
- f6f7_38 := int64(f6) * int64(f7_38)
- f6f8_38 := int64(f6_2) * int64(f8_19)
- f6f9_38 := int64(f6) * int64(f9_38)
- f7f7_38 := int64(f7) * int64(f7_38)
- f7f8_38 := int64(f7_2) * int64(f8_19)
- f7f9_76 := int64(f7_2) * int64(f9_38)
- f8f8_19 := int64(f8) * int64(f8_19)
- f8f9_38 := int64(f8) * int64(f9_38)
- f9f9_38 := int64(f9) * int64(f9_38)
- h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
- h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
- h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
- h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
- h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
- h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
- h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
- h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
- h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
- h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
- var carry [10]int64
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
-
- carry[1] = (h1 + (1 << 24)) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[5] = (h5 + (1 << 24)) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
-
- carry[2] = (h2 + (1 << 25)) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[6] = (h6 + (1 << 25)) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
-
- carry[3] = (h3 + (1 << 24)) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[7] = (h7 + (1 << 24)) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
-
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[8] = (h8 + (1 << 25)) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
-
- carry[9] = (h9 + (1 << 24)) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// feMul121666 calculates h = f * 121666. Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func feMul121666(h, f *fieldElement) {
- h0 := int64(f[0]) * 121666
- h1 := int64(f[1]) * 121666
- h2 := int64(f[2]) * 121666
- h3 := int64(f[3]) * 121666
- h4 := int64(f[4]) * 121666
- h5 := int64(f[5]) * 121666
- h6 := int64(f[6]) * 121666
- h7 := int64(f[7]) * 121666
- h8 := int64(f[8]) * 121666
- h9 := int64(f[9]) * 121666
- var carry [10]int64
-
- carry[9] = (h9 + (1 << 24)) >> 25
- h0 += carry[9] * 19
- h9 -= carry[9] << 25
- carry[1] = (h1 + (1 << 24)) >> 25
- h2 += carry[1]
- h1 -= carry[1] << 25
- carry[3] = (h3 + (1 << 24)) >> 25
- h4 += carry[3]
- h3 -= carry[3] << 25
- carry[5] = (h5 + (1 << 24)) >> 25
- h6 += carry[5]
- h5 -= carry[5] << 25
- carry[7] = (h7 + (1 << 24)) >> 25
- h8 += carry[7]
- h7 -= carry[7] << 25
-
- carry[0] = (h0 + (1 << 25)) >> 26
- h1 += carry[0]
- h0 -= carry[0] << 26
- carry[2] = (h2 + (1 << 25)) >> 26
- h3 += carry[2]
- h2 -= carry[2] << 26
- carry[4] = (h4 + (1 << 25)) >> 26
- h5 += carry[4]
- h4 -= carry[4] << 26
- carry[6] = (h6 + (1 << 25)) >> 26
- h7 += carry[6]
- h6 -= carry[6] << 26
- carry[8] = (h8 + (1 << 25)) >> 26
- h9 += carry[8]
- h8 -= carry[8] << 26
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// feInvert sets out = z^-1.
-func feInvert(out, z *fieldElement) {
- var t0, t1, t2, t3 fieldElement
- var i int
-
- feSquare(&t0, z)
- for i = 1; i < 1; i++ {
- feSquare(&t0, &t0)
- }
- feSquare(&t1, &t0)
- for i = 1; i < 2; i++ {
- feSquare(&t1, &t1)
- }
- feMul(&t1, z, &t1)
- feMul(&t0, &t0, &t1)
- feSquare(&t2, &t0)
- for i = 1; i < 1; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t1, &t2)
- feSquare(&t2, &t1)
- for i = 1; i < 5; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t2, &t1)
- feSquare(&t2, &t1)
- for i = 1; i < 10; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t2, &t2, &t1)
- feSquare(&t3, &t2)
- for i = 1; i < 20; i++ {
- feSquare(&t3, &t3)
- }
- feMul(&t2, &t3, &t2)
- feSquare(&t2, &t2)
- for i = 1; i < 10; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t2, &t1)
- feSquare(&t2, &t1)
- for i = 1; i < 50; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t2, &t2, &t1)
- feSquare(&t3, &t2)
- for i = 1; i < 100; i++ {
- feSquare(&t3, &t3)
- }
- feMul(&t2, &t3, &t2)
- feSquare(&t2, &t2)
- for i = 1; i < 50; i++ {
- feSquare(&t2, &t2)
- }
- feMul(&t1, &t2, &t1)
- feSquare(&t1, &t1)
- for i = 1; i < 5; i++ {
- feSquare(&t1, &t1)
- }
- feMul(out, &t1, &t0)
-}
-
-func scalarMult(out, in, base *[32]byte) {
- var e [32]byte
-
- copy(e[:], in[:])
- e[0] &= 248
- e[31] &= 127
- e[31] |= 64
-
- var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
- feFromBytes(&x1, base)
- feOne(&x2)
- feCopy(&x3, &x1)
- feOne(&z3)
-
- swap := int32(0)
- for pos := 254; pos >= 0; pos-- {
- b := e[pos/8] >> uint(pos&7)
- b &= 1
- swap ^= int32(b)
- feCSwap(&x2, &x3, swap)
- feCSwap(&z2, &z3, swap)
- swap = int32(b)
-
- feSub(&tmp0, &x3, &z3)
- feSub(&tmp1, &x2, &z2)
- feAdd(&x2, &x2, &z2)
- feAdd(&z2, &x3, &z3)
- feMul(&z3, &tmp0, &x2)
- feMul(&z2, &z2, &tmp1)
- feSquare(&tmp0, &tmp1)
- feSquare(&tmp1, &x2)
- feAdd(&x3, &z3, &z2)
- feSub(&z2, &z3, &z2)
- feMul(&x2, &tmp1, &tmp0)
- feSub(&tmp1, &tmp1, &tmp0)
- feSquare(&z2, &z2)
- feMul121666(&z3, &tmp1)
- feSquare(&x3, &x3)
- feAdd(&tmp0, &tmp0, &z3)
- feMul(&z3, &x1, &z2)
- feMul(&z2, &tmp1, &tmp0)
- }
-
- feCSwap(&x2, &x3, swap)
- feCSwap(&z2, &z3, swap)
-
- feInvert(&z2, &z2)
- feMul(&x2, &x2, &z2)
- feToBytes(out, &x2)
-}
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go b/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go
deleted file mode 100644
index a3d3a3d9..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/curve_impl.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package curve25519
-
-import (
- "crypto/elliptic"
- "math/big"
- "sync"
-)
-
-var cv25519 cv25519Curve
-
-type cv25519Curve struct {
- *elliptic.CurveParams
-}
-
-func copyReverse(dst []byte, src []byte) {
- // Curve 25519 multiplication functions expect scalars in reverse
- // order than PGP. To keep the curve25519Curve type consistent
- // with other curves, we reverse it here.
- for i, j := 0, len(src)-1; j >= 0 && i < len(dst); i, j = i+1, j-1 {
- dst[i] = src[j]
- }
-}
-
-func copyTruncate(dst []byte, src []byte) {
- lenDst, lenSrc := len(dst), len(src)
- if lenDst == lenSrc {
- copy(dst, src)
- } else if lenDst > lenSrc {
- copy(dst[lenDst-lenSrc:lenDst], src)
- } else if lenDst < lenSrc {
- copy(dst, src[:lenDst])
- }
-}
-
-func (cv25519Curve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) {
- // Assume y1 is 0 with cv25519.
- var dst [32]byte
- var x1Bytes [32]byte
- var scalarBytes [32]byte
-
- copyTruncate(x1Bytes[:], x1.Bytes())
- copyReverse(scalarBytes[:], scalar)
-
- scalarMult(&dst, &scalarBytes, &x1Bytes)
-
- x = new(big.Int).SetBytes(dst[:])
- y = new(big.Int)
- return x, y
-}
-
-func (cv25519Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
- var dst [32]byte
- var scalarBytes [32]byte
- copyReverse(scalarBytes[:], scalar[:32])
- scalarMult(&dst, &scalarBytes, &basePoint)
- x = new(big.Int).SetBytes(dst[:])
- y = new(big.Int)
- return x, y
-}
-
-func (cv25519Curve) IsOnCurve(bigX, bigY *big.Int) bool {
- return bigY.Sign() == 0 // bigY == 0 ?
-}
-
-// More information about 0x40 point format:
-// https://tools.ietf.org/html/draft-koch-eddsa-for-openpgp-00#section-3
-// In addition to uncompressed point format described here:
-// https://tools.ietf.org/html/rfc6637#section-6
-
-func (cv25519Curve) MarshalType40(x, y *big.Int) []byte {
- byteLen := 32
-
- ret := make([]byte, 1+byteLen)
- ret[0] = 0x40
-
- xBytes := x.Bytes()
- copyTruncate(ret[1:], xBytes)
- return ret
-}
-
-func (cv25519Curve) UnmarshalType40(data []byte) (x, y *big.Int) {
- if len(data) != 1+32 {
- return nil, nil
- }
- if data[0] != 0x40 {
- return nil, nil
- }
- x = new(big.Int).SetBytes(data[1:])
- // Any x is a valid curve point.
- return x, new(big.Int)
-}
-
-// ToCurve25519 casts given elliptic.Curve type to Curve25519 type, or
-// returns nil, false if cast was unsuccessful.
-func ToCurve25519(cv elliptic.Curve) (cv25519Curve, bool) {
- cv2, ok := cv.(cv25519Curve)
- return cv2, ok
-}
-
-func initCv25519() {
- cv25519.CurveParams = &elliptic.CurveParams{Name: "Curve 25519"}
- // Some code relies on these parameters being available for
- // checking Curve coordinate length. They should not be used
- // directly for any calculations.
- cv25519.P, _ = new(big.Int).SetString("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed", 16)
- cv25519.N, _ = new(big.Int).SetString("1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed", 16)
- cv25519.Gx, _ = new(big.Int).SetString("9", 16)
- cv25519.Gy, _ = new(big.Int).SetString("20ae19a1b8a086b4e01edd2c7748d14c923d4d7e6d7c61b229e9c5a27eced3d9", 16)
- cv25519.BitSize = 256
-}
-
-var initonce sync.Once
-
-// Cv25519 returns a Curve which (partially) implements Cv25519. Only
-// ScalarMult and ScalarBaseMult are valid for this curve. Add and
-// Double should not be used.
-func Cv25519() elliptic.Curve {
- initonce.Do(initCv25519)
- return cv25519
-}
-
-func (curve cv25519Curve) Params() *elliptic.CurveParams {
- return curve.CurveParams
-}
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/doc.go b/vendor/github.com/keybase/go-crypto/curve25519/doc.go
deleted file mode 100644
index 78bd9fc0..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package curve25519 provides an implementation of scalar multiplication on
-// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
-package curve25519 // import "github.com/keybase/go-crypto/curve25519"
-
-// basePoint is the x coordinate of the generator of the curve.
-var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
-
-// ScalarMult sets dst to the product in*base where dst and base are the x
-// coordinates of group points and all values are in little-endian form.
-func ScalarMult(dst, in, base *[32]byte) {
- scalarMult(dst, in, base)
-}
-
-// ScalarBaseMult sets dst to the product in*base where dst and base are the x
-// coordinates of group points, base is the standard generator and all values
-// are in little-endian form.
-func ScalarBaseMult(dst, in *[32]byte) {
- ScalarMult(dst, in, &basePoint)
-}
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s
deleted file mode 100644
index 39081610..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/freeze_amd64.s
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func freeze(inout *[5]uint64)
-TEXT ·freeze(SB),7,$0-8
- MOVQ inout+0(FP), DI
-
- MOVQ 0(DI),SI
- MOVQ 8(DI),DX
- MOVQ 16(DI),CX
- MOVQ 24(DI),R8
- MOVQ 32(DI),R9
- MOVQ $REDMASK51,AX
- MOVQ AX,R10
- SUBQ $18,R10
- MOVQ $3,R11
-REDUCELOOP:
- MOVQ SI,R12
- SHRQ $51,R12
- ANDQ AX,SI
- ADDQ R12,DX
- MOVQ DX,R12
- SHRQ $51,R12
- ANDQ AX,DX
- ADDQ R12,CX
- MOVQ CX,R12
- SHRQ $51,R12
- ANDQ AX,CX
- ADDQ R12,R8
- MOVQ R8,R12
- SHRQ $51,R12
- ANDQ AX,R8
- ADDQ R12,R9
- MOVQ R9,R12
- SHRQ $51,R12
- ANDQ AX,R9
- IMUL3Q $19,R12,R12
- ADDQ R12,SI
- SUBQ $1,R11
- JA REDUCELOOP
- MOVQ $1,R12
- CMPQ R10,SI
- CMOVQLT R11,R12
- CMPQ AX,DX
- CMOVQNE R11,R12
- CMPQ AX,CX
- CMOVQNE R11,R12
- CMPQ AX,R8
- CMOVQNE R11,R12
- CMPQ AX,R9
- CMOVQNE R11,R12
- NEGQ R12
- ANDQ R12,AX
- ANDQ R12,R10
- SUBQ R10,SI
- SUBQ AX,DX
- SUBQ AX,CX
- SUBQ AX,R8
- SUBQ AX,R9
- MOVQ SI,0(DI)
- MOVQ DX,8(DI)
- MOVQ CX,16(DI)
- MOVQ R8,24(DI)
- MOVQ R9,32(DI)
- RET
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s
deleted file mode 100644
index 9e9040b2..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/ladderstep_amd64.s
+++ /dev/null
@@ -1,1377 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func ladderstep(inout *[5][5]uint64)
-TEXT ·ladderstep(SB),0,$296-8
- MOVQ inout+0(FP),DI
-
- MOVQ 40(DI),SI
- MOVQ 48(DI),DX
- MOVQ 56(DI),CX
- MOVQ 64(DI),R8
- MOVQ 72(DI),R9
- MOVQ SI,AX
- MOVQ DX,R10
- MOVQ CX,R11
- MOVQ R8,R12
- MOVQ R9,R13
- ADDQ ·_2P0(SB),AX
- ADDQ ·_2P1234(SB),R10
- ADDQ ·_2P1234(SB),R11
- ADDQ ·_2P1234(SB),R12
- ADDQ ·_2P1234(SB),R13
- ADDQ 80(DI),SI
- ADDQ 88(DI),DX
- ADDQ 96(DI),CX
- ADDQ 104(DI),R8
- ADDQ 112(DI),R9
- SUBQ 80(DI),AX
- SUBQ 88(DI),R10
- SUBQ 96(DI),R11
- SUBQ 104(DI),R12
- SUBQ 112(DI),R13
- MOVQ SI,0(SP)
- MOVQ DX,8(SP)
- MOVQ CX,16(SP)
- MOVQ R8,24(SP)
- MOVQ R9,32(SP)
- MOVQ AX,40(SP)
- MOVQ R10,48(SP)
- MOVQ R11,56(SP)
- MOVQ R12,64(SP)
- MOVQ R13,72(SP)
- MOVQ 40(SP),AX
- MULQ 40(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 48(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 56(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 64(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 40(SP),AX
- SHLQ $1,AX
- MULQ 72(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 48(SP),AX
- MULQ 48(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 48(SP),AX
- SHLQ $1,AX
- MULQ 56(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 48(SP),AX
- SHLQ $1,AX
- MULQ 64(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 48(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 72(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 56(SP),AX
- MULQ 56(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 56(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 64(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 56(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 72(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 64(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 64(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 64(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 72(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 72(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 72(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,80(SP)
- MOVQ R8,88(SP)
- MOVQ R9,96(SP)
- MOVQ AX,104(SP)
- MOVQ R10,112(SP)
- MOVQ 0(SP),AX
- MULQ 0(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 8(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 16(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 24(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 0(SP),AX
- SHLQ $1,AX
- MULQ 32(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 8(SP),AX
- MULQ 8(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- SHLQ $1,AX
- MULQ 16(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 8(SP),AX
- SHLQ $1,AX
- MULQ 24(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 16(SP),AX
- MULQ 16(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 16(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 24(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 16(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 24(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 24(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 24(SP),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 32(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,120(SP)
- MOVQ R8,128(SP)
- MOVQ R9,136(SP)
- MOVQ AX,144(SP)
- MOVQ R10,152(SP)
- MOVQ SI,SI
- MOVQ R8,DX
- MOVQ R9,CX
- MOVQ AX,R8
- MOVQ R10,R9
- ADDQ ·_2P0(SB),SI
- ADDQ ·_2P1234(SB),DX
- ADDQ ·_2P1234(SB),CX
- ADDQ ·_2P1234(SB),R8
- ADDQ ·_2P1234(SB),R9
- SUBQ 80(SP),SI
- SUBQ 88(SP),DX
- SUBQ 96(SP),CX
- SUBQ 104(SP),R8
- SUBQ 112(SP),R9
- MOVQ SI,160(SP)
- MOVQ DX,168(SP)
- MOVQ CX,176(SP)
- MOVQ R8,184(SP)
- MOVQ R9,192(SP)
- MOVQ 120(DI),SI
- MOVQ 128(DI),DX
- MOVQ 136(DI),CX
- MOVQ 144(DI),R8
- MOVQ 152(DI),R9
- MOVQ SI,AX
- MOVQ DX,R10
- MOVQ CX,R11
- MOVQ R8,R12
- MOVQ R9,R13
- ADDQ ·_2P0(SB),AX
- ADDQ ·_2P1234(SB),R10
- ADDQ ·_2P1234(SB),R11
- ADDQ ·_2P1234(SB),R12
- ADDQ ·_2P1234(SB),R13
- ADDQ 160(DI),SI
- ADDQ 168(DI),DX
- ADDQ 176(DI),CX
- ADDQ 184(DI),R8
- ADDQ 192(DI),R9
- SUBQ 160(DI),AX
- SUBQ 168(DI),R10
- SUBQ 176(DI),R11
- SUBQ 184(DI),R12
- SUBQ 192(DI),R13
- MOVQ SI,200(SP)
- MOVQ DX,208(SP)
- MOVQ CX,216(SP)
- MOVQ R8,224(SP)
- MOVQ R9,232(SP)
- MOVQ AX,240(SP)
- MOVQ R10,248(SP)
- MOVQ R11,256(SP)
- MOVQ R12,264(SP)
- MOVQ R13,272(SP)
- MOVQ 224(SP),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,280(SP)
- MULQ 56(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 232(SP),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,288(SP)
- MULQ 48(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 200(SP),AX
- MULQ 40(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 200(SP),AX
- MULQ 48(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 200(SP),AX
- MULQ 56(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 200(SP),AX
- MULQ 64(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 200(SP),AX
- MULQ 72(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 208(SP),AX
- MULQ 40(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 208(SP),AX
- MULQ 48(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 208(SP),AX
- MULQ 56(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 208(SP),AX
- MULQ 64(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 208(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 72(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 216(SP),AX
- MULQ 40(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 216(SP),AX
- MULQ 48(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 216(SP),AX
- MULQ 56(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 216(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 64(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 216(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 72(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 224(SP),AX
- MULQ 40(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 224(SP),AX
- MULQ 48(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 280(SP),AX
- MULQ 64(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 280(SP),AX
- MULQ 72(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 232(SP),AX
- MULQ 40(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 288(SP),AX
- MULQ 56(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 288(SP),AX
- MULQ 64(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 288(SP),AX
- MULQ 72(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,40(SP)
- MOVQ R8,48(SP)
- MOVQ R9,56(SP)
- MOVQ AX,64(SP)
- MOVQ R10,72(SP)
- MOVQ 264(SP),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,200(SP)
- MULQ 16(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 272(SP),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,208(SP)
- MULQ 8(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 240(SP),AX
- MULQ 0(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 240(SP),AX
- MULQ 8(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 240(SP),AX
- MULQ 16(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 240(SP),AX
- MULQ 24(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 240(SP),AX
- MULQ 32(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 248(SP),AX
- MULQ 0(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 248(SP),AX
- MULQ 8(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 248(SP),AX
- MULQ 16(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 248(SP),AX
- MULQ 24(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 248(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 256(SP),AX
- MULQ 0(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 256(SP),AX
- MULQ 8(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 256(SP),AX
- MULQ 16(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 256(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 24(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 256(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 264(SP),AX
- MULQ 0(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 264(SP),AX
- MULQ 8(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 200(SP),AX
- MULQ 24(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 200(SP),AX
- MULQ 32(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 272(SP),AX
- MULQ 0(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 208(SP),AX
- MULQ 16(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 208(SP),AX
- MULQ 24(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 208(SP),AX
- MULQ 32(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,DX
- MOVQ R8,CX
- MOVQ R9,R11
- MOVQ AX,R12
- MOVQ R10,R13
- ADDQ ·_2P0(SB),DX
- ADDQ ·_2P1234(SB),CX
- ADDQ ·_2P1234(SB),R11
- ADDQ ·_2P1234(SB),R12
- ADDQ ·_2P1234(SB),R13
- ADDQ 40(SP),SI
- ADDQ 48(SP),R8
- ADDQ 56(SP),R9
- ADDQ 64(SP),AX
- ADDQ 72(SP),R10
- SUBQ 40(SP),DX
- SUBQ 48(SP),CX
- SUBQ 56(SP),R11
- SUBQ 64(SP),R12
- SUBQ 72(SP),R13
- MOVQ SI,120(DI)
- MOVQ R8,128(DI)
- MOVQ R9,136(DI)
- MOVQ AX,144(DI)
- MOVQ R10,152(DI)
- MOVQ DX,160(DI)
- MOVQ CX,168(DI)
- MOVQ R11,176(DI)
- MOVQ R12,184(DI)
- MOVQ R13,192(DI)
- MOVQ 120(DI),AX
- MULQ 120(DI)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 128(DI)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 136(DI)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 144(DI)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 120(DI),AX
- SHLQ $1,AX
- MULQ 152(DI)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 128(DI),AX
- MULQ 128(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 128(DI),AX
- SHLQ $1,AX
- MULQ 136(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 128(DI),AX
- SHLQ $1,AX
- MULQ 144(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 128(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 152(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(DI),AX
- MULQ 136(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 136(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 144(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 152(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 144(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 144(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 144(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 152(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 152(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 152(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,120(DI)
- MOVQ R8,128(DI)
- MOVQ R9,136(DI)
- MOVQ AX,144(DI)
- MOVQ R10,152(DI)
- MOVQ 160(DI),AX
- MULQ 160(DI)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 168(DI)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 176(DI)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 184(DI)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 160(DI),AX
- SHLQ $1,AX
- MULQ 192(DI)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 168(DI),AX
- MULQ 168(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 168(DI),AX
- SHLQ $1,AX
- MULQ 176(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 168(DI),AX
- SHLQ $1,AX
- MULQ 184(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 168(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 192(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),AX
- MULQ 176(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 176(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 184(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 192(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 184(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 184(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 184(DI),DX
- IMUL3Q $38,DX,AX
- MULQ 192(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 192(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 192(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- ANDQ DX,SI
- MOVQ CX,R8
- SHRQ $51,CX
- ADDQ R10,CX
- ANDQ DX,R8
- MOVQ CX,R9
- SHRQ $51,CX
- ADDQ R12,CX
- ANDQ DX,R9
- MOVQ CX,AX
- SHRQ $51,CX
- ADDQ R14,CX
- ANDQ DX,AX
- MOVQ CX,R10
- SHRQ $51,CX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,160(DI)
- MOVQ R8,168(DI)
- MOVQ R9,176(DI)
- MOVQ AX,184(DI)
- MOVQ R10,192(DI)
- MOVQ 184(DI),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,0(SP)
- MULQ 16(DI)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 192(DI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 8(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 160(DI),AX
- MULQ 0(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 160(DI),AX
- MULQ 8(DI)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 160(DI),AX
- MULQ 16(DI)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 160(DI),AX
- MULQ 24(DI)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 160(DI),AX
- MULQ 32(DI)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 168(DI),AX
- MULQ 0(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 168(DI),AX
- MULQ 8(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 168(DI),AX
- MULQ 16(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 168(DI),AX
- MULQ 24(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 168(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),AX
- MULQ 0(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 176(DI),AX
- MULQ 8(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 176(DI),AX
- MULQ 16(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 176(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 24(DI)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 176(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 184(DI),AX
- MULQ 0(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 184(DI),AX
- MULQ 8(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 0(SP),AX
- MULQ 24(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SP),AX
- MULQ 32(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 192(DI),AX
- MULQ 0(DI)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),AX
- MULQ 16(DI)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 8(SP),AX
- MULQ 24(DI)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 32(DI)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,160(DI)
- MOVQ R8,168(DI)
- MOVQ R9,176(DI)
- MOVQ AX,184(DI)
- MOVQ R10,192(DI)
- MOVQ 144(SP),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,0(SP)
- MULQ 96(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 152(SP),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 88(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 120(SP),AX
- MULQ 80(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 120(SP),AX
- MULQ 88(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 120(SP),AX
- MULQ 96(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 120(SP),AX
- MULQ 104(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 120(SP),AX
- MULQ 112(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 128(SP),AX
- MULQ 80(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 128(SP),AX
- MULQ 88(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 128(SP),AX
- MULQ 96(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 128(SP),AX
- MULQ 104(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 128(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 112(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(SP),AX
- MULQ 80(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 136(SP),AX
- MULQ 88(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 136(SP),AX
- MULQ 96(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 136(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 104(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 136(SP),DX
- IMUL3Q $19,DX,AX
- MULQ 112(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 144(SP),AX
- MULQ 80(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 144(SP),AX
- MULQ 88(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 0(SP),AX
- MULQ 104(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SP),AX
- MULQ 112(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 152(SP),AX
- MULQ 80(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),AX
- MULQ 96(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 8(SP),AX
- MULQ 104(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 112(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,40(DI)
- MOVQ R8,48(DI)
- MOVQ R9,56(DI)
- MOVQ AX,64(DI)
- MOVQ R10,72(DI)
- MOVQ 160(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 168(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,CX
- MOVQ DX,R8
- MOVQ 176(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,R8
- MOVQ DX,R9
- MOVQ 184(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,R9
- MOVQ DX,R10
- MOVQ 192(SP),AX
- MULQ ·_121666_213(SB)
- SHRQ $13,AX
- ADDQ AX,R10
- IMUL3Q $19,DX,DX
- ADDQ DX,SI
- ADDQ 80(SP),SI
- ADDQ 88(SP),CX
- ADDQ 96(SP),R8
- ADDQ 104(SP),R9
- ADDQ 112(SP),R10
- MOVQ SI,80(DI)
- MOVQ CX,88(DI)
- MOVQ R8,96(DI)
- MOVQ R9,104(DI)
- MOVQ R10,112(DI)
- MOVQ 104(DI),SI
- IMUL3Q $19,SI,AX
- MOVQ AX,0(SP)
- MULQ 176(SP)
- MOVQ AX,SI
- MOVQ DX,CX
- MOVQ 112(DI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 168(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 80(DI),AX
- MULQ 160(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 80(DI),AX
- MULQ 168(SP)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 80(DI),AX
- MULQ 176(SP)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 80(DI),AX
- MULQ 184(SP)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 80(DI),AX
- MULQ 192(SP)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 88(DI),AX
- MULQ 160(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 88(DI),AX
- MULQ 168(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 88(DI),AX
- MULQ 176(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 88(DI),AX
- MULQ 184(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 88(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 192(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 96(DI),AX
- MULQ 160(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 96(DI),AX
- MULQ 168(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 96(DI),AX
- MULQ 176(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 96(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 184(SP)
- ADDQ AX,SI
- ADCQ DX,CX
- MOVQ 96(DI),DX
- IMUL3Q $19,DX,AX
- MULQ 192(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 104(DI),AX
- MULQ 160(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 104(DI),AX
- MULQ 168(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 0(SP),AX
- MULQ 184(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SP),AX
- MULQ 192(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 112(DI),AX
- MULQ 160(SP)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SP),AX
- MULQ 176(SP)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 8(SP),AX
- MULQ 184(SP)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 192(SP)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ $REDMASK51,DX
- SHLQ $13,CX:SI
- ANDQ DX,SI
- SHLQ $13,R9:R8
- ANDQ DX,R8
- ADDQ CX,R8
- SHLQ $13,R11:R10
- ANDQ DX,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ DX,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ DX,R14
- ADDQ R13,R14
- IMUL3Q $19,R15,CX
- ADDQ CX,SI
- MOVQ SI,CX
- SHRQ $51,CX
- ADDQ R8,CX
- MOVQ CX,R8
- SHRQ $51,CX
- ANDQ DX,SI
- ADDQ R10,CX
- MOVQ CX,R9
- SHRQ $51,CX
- ANDQ DX,R8
- ADDQ R12,CX
- MOVQ CX,AX
- SHRQ $51,CX
- ANDQ DX,R9
- ADDQ R14,CX
- MOVQ CX,R10
- SHRQ $51,CX
- ANDQ DX,AX
- IMUL3Q $19,CX,CX
- ADDQ CX,SI
- ANDQ DX,R10
- MOVQ SI,80(DI)
- MOVQ R8,88(DI)
- MOVQ R9,96(DI)
- MOVQ AX,104(DI)
- MOVQ R10,112(DI)
- RET
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go b/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go
deleted file mode 100644
index 5822bd53..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/mont25519_amd64.go
+++ /dev/null
@@ -1,240 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64,!gccgo,!appengine
-
-package curve25519
-
-// These functions are implemented in the .s files. The names of the functions
-// in the rest of the file are also taken from the SUPERCOP sources to help
-// people following along.
-
-//go:noescape
-
-func cswap(inout *[5]uint64, v uint64)
-
-//go:noescape
-
-func ladderstep(inout *[5][5]uint64)
-
-//go:noescape
-
-func freeze(inout *[5]uint64)
-
-//go:noescape
-
-func mul(dest, a, b *[5]uint64)
-
-//go:noescape
-
-func square(out, in *[5]uint64)
-
-// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
-func mladder(xr, zr *[5]uint64, s *[32]byte) {
- var work [5][5]uint64
-
- work[0] = *xr
- setint(&work[1], 1)
- setint(&work[2], 0)
- work[3] = *xr
- setint(&work[4], 1)
-
- j := uint(6)
- var prevbit byte
-
- for i := 31; i >= 0; i-- {
- for j < 8 {
- bit := ((*s)[i] >> j) & 1
- swap := bit ^ prevbit
- prevbit = bit
- cswap(&work[1], uint64(swap))
- ladderstep(&work)
- j--
- }
- j = 7
- }
-
- *xr = work[1]
- *zr = work[2]
-}
-
-func scalarMult(out, in, base *[32]byte) {
- var e [32]byte
- copy(e[:], (*in)[:])
- e[0] &= 248
- e[31] &= 127
- e[31] |= 64
-
- var t, z [5]uint64
- unpack(&t, base)
- mladder(&t, &z, &e)
- invert(&z, &z)
- mul(&t, &t, &z)
- pack(out, &t)
-}
-
-func setint(r *[5]uint64, v uint64) {
- r[0] = v
- r[1] = 0
- r[2] = 0
- r[3] = 0
- r[4] = 0
-}
-
-// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
-// order.
-func unpack(r *[5]uint64, x *[32]byte) {
- r[0] = uint64(x[0]) |
- uint64(x[1])<<8 |
- uint64(x[2])<<16 |
- uint64(x[3])<<24 |
- uint64(x[4])<<32 |
- uint64(x[5])<<40 |
- uint64(x[6]&7)<<48
-
- r[1] = uint64(x[6])>>3 |
- uint64(x[7])<<5 |
- uint64(x[8])<<13 |
- uint64(x[9])<<21 |
- uint64(x[10])<<29 |
- uint64(x[11])<<37 |
- uint64(x[12]&63)<<45
-
- r[2] = uint64(x[12])>>6 |
- uint64(x[13])<<2 |
- uint64(x[14])<<10 |
- uint64(x[15])<<18 |
- uint64(x[16])<<26 |
- uint64(x[17])<<34 |
- uint64(x[18])<<42 |
- uint64(x[19]&1)<<50
-
- r[3] = uint64(x[19])>>1 |
- uint64(x[20])<<7 |
- uint64(x[21])<<15 |
- uint64(x[22])<<23 |
- uint64(x[23])<<31 |
- uint64(x[24])<<39 |
- uint64(x[25]&15)<<47
-
- r[4] = uint64(x[25])>>4 |
- uint64(x[26])<<4 |
- uint64(x[27])<<12 |
- uint64(x[28])<<20 |
- uint64(x[29])<<28 |
- uint64(x[30])<<36 |
- uint64(x[31]&127)<<44
-}
-
-// pack sets out = x where out is the usual, little-endian form of the 5,
-// 51-bit limbs in x.
-func pack(out *[32]byte, x *[5]uint64) {
- t := *x
- freeze(&t)
-
- out[0] = byte(t[0])
- out[1] = byte(t[0] >> 8)
- out[2] = byte(t[0] >> 16)
- out[3] = byte(t[0] >> 24)
- out[4] = byte(t[0] >> 32)
- out[5] = byte(t[0] >> 40)
- out[6] = byte(t[0] >> 48)
-
- out[6] ^= byte(t[1]<<3) & 0xf8
- out[7] = byte(t[1] >> 5)
- out[8] = byte(t[1] >> 13)
- out[9] = byte(t[1] >> 21)
- out[10] = byte(t[1] >> 29)
- out[11] = byte(t[1] >> 37)
- out[12] = byte(t[1] >> 45)
-
- out[12] ^= byte(t[2]<<6) & 0xc0
- out[13] = byte(t[2] >> 2)
- out[14] = byte(t[2] >> 10)
- out[15] = byte(t[2] >> 18)
- out[16] = byte(t[2] >> 26)
- out[17] = byte(t[2] >> 34)
- out[18] = byte(t[2] >> 42)
- out[19] = byte(t[2] >> 50)
-
- out[19] ^= byte(t[3]<<1) & 0xfe
- out[20] = byte(t[3] >> 7)
- out[21] = byte(t[3] >> 15)
- out[22] = byte(t[3] >> 23)
- out[23] = byte(t[3] >> 31)
- out[24] = byte(t[3] >> 39)
- out[25] = byte(t[3] >> 47)
-
- out[25] ^= byte(t[4]<<4) & 0xf0
- out[26] = byte(t[4] >> 4)
- out[27] = byte(t[4] >> 12)
- out[28] = byte(t[4] >> 20)
- out[29] = byte(t[4] >> 28)
- out[30] = byte(t[4] >> 36)
- out[31] = byte(t[4] >> 44)
-}
-
-// invert calculates r = x^-1 mod p using Fermat's little theorem.
-func invert(r *[5]uint64, x *[5]uint64) {
- var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
-
- square(&z2, x) /* 2 */
- square(&t, &z2) /* 4 */
- square(&t, &t) /* 8 */
- mul(&z9, &t, x) /* 9 */
- mul(&z11, &z9, &z2) /* 11 */
- square(&t, &z11) /* 22 */
- mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
-
- square(&t, &z2_5_0) /* 2^6 - 2^1 */
- for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
- square(&t, &t)
- }
- mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
-
- square(&t, &z2_10_0) /* 2^11 - 2^1 */
- for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
- square(&t, &t)
- }
- mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
-
- square(&t, &z2_20_0) /* 2^21 - 2^1 */
- for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
- square(&t, &t)
- }
- mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
-
- square(&t, &t) /* 2^41 - 2^1 */
- for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
- square(&t, &t)
- }
- mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
-
- square(&t, &z2_50_0) /* 2^51 - 2^1 */
- for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
- square(&t, &t)
- }
- mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
-
- square(&t, &z2_100_0) /* 2^101 - 2^1 */
- for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
- square(&t, &t)
- }
- mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
-
- square(&t, &t) /* 2^201 - 2^1 */
- for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
- square(&t, &t)
- }
- mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
-
- square(&t, &t) /* 2^251 - 2^1 */
- square(&t, &t) /* 2^252 - 2^2 */
- square(&t, &t) /* 2^253 - 2^3 */
-
- square(&t, &t) /* 2^254 - 2^4 */
-
- square(&t, &t) /* 2^255 - 2^5 */
- mul(r, &t, &z11) /* 2^255 - 21 */
-}
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s
deleted file mode 100644
index 5ce80a2e..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/mul_amd64.s
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func mul(dest, a, b *[5]uint64)
-TEXT ·mul(SB),0,$16-24
- MOVQ dest+0(FP), DI
- MOVQ a+8(FP), SI
- MOVQ b+16(FP), DX
-
- MOVQ DX,CX
- MOVQ 24(SI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,0(SP)
- MULQ 16(CX)
- MOVQ AX,R8
- MOVQ DX,R9
- MOVQ 32(SI),DX
- IMUL3Q $19,DX,AX
- MOVQ AX,8(SP)
- MULQ 8(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SI),AX
- MULQ 0(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 0(SI),AX
- MULQ 8(CX)
- MOVQ AX,R10
- MOVQ DX,R11
- MOVQ 0(SI),AX
- MULQ 16(CX)
- MOVQ AX,R12
- MOVQ DX,R13
- MOVQ 0(SI),AX
- MULQ 24(CX)
- MOVQ AX,R14
- MOVQ DX,R15
- MOVQ 0(SI),AX
- MULQ 32(CX)
- MOVQ AX,BX
- MOVQ DX,BP
- MOVQ 8(SI),AX
- MULQ 0(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SI),AX
- MULQ 8(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 8(SI),AX
- MULQ 16(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 8(SI),AX
- MULQ 24(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 8(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 16(SI),AX
- MULQ 0(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 16(SI),AX
- MULQ 8(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 16(SI),AX
- MULQ 16(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 16(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 24(CX)
- ADDQ AX,R8
- ADCQ DX,R9
- MOVQ 16(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 24(SI),AX
- MULQ 0(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ 24(SI),AX
- MULQ 8(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 0(SP),AX
- MULQ 24(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 0(SP),AX
- MULQ 32(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 32(SI),AX
- MULQ 0(CX)
- ADDQ AX,BX
- ADCQ DX,BP
- MOVQ 8(SP),AX
- MULQ 16(CX)
- ADDQ AX,R10
- ADCQ DX,R11
- MOVQ 8(SP),AX
- MULQ 24(CX)
- ADDQ AX,R12
- ADCQ DX,R13
- MOVQ 8(SP),AX
- MULQ 32(CX)
- ADDQ AX,R14
- ADCQ DX,R15
- MOVQ $REDMASK51,SI
- SHLQ $13,R9:R8
- ANDQ SI,R8
- SHLQ $13,R11:R10
- ANDQ SI,R10
- ADDQ R9,R10
- SHLQ $13,R13:R12
- ANDQ SI,R12
- ADDQ R11,R12
- SHLQ $13,R15:R14
- ANDQ SI,R14
- ADDQ R13,R14
- SHLQ $13,BP:BX
- ANDQ SI,BX
- ADDQ R15,BX
- IMUL3Q $19,BP,DX
- ADDQ DX,R8
- MOVQ R8,DX
- SHRQ $51,DX
- ADDQ R10,DX
- MOVQ DX,CX
- SHRQ $51,DX
- ANDQ SI,R8
- ADDQ R12,DX
- MOVQ DX,R9
- SHRQ $51,DX
- ANDQ SI,CX
- ADDQ R14,DX
- MOVQ DX,AX
- SHRQ $51,DX
- ANDQ SI,R9
- ADDQ BX,DX
- MOVQ DX,R10
- SHRQ $51,DX
- ANDQ SI,AX
- IMUL3Q $19,DX,DX
- ADDQ DX,R8
- ANDQ SI,R10
- MOVQ R8,0(DI)
- MOVQ CX,8(DI)
- MOVQ R9,16(DI)
- MOVQ AX,24(DI)
- MOVQ R10,32(DI)
- RET
diff --git a/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s b/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s
deleted file mode 100644
index 12f73734..00000000
--- a/vendor/github.com/keybase/go-crypto/curve25519/square_amd64.s
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-#include "const_amd64.h"
-
-// func square(out, in *[5]uint64)
-TEXT ·square(SB),7,$0-16
- MOVQ out+0(FP), DI
- MOVQ in+8(FP), SI
-
- MOVQ 0(SI),AX
- MULQ 0(SI)
- MOVQ AX,CX
- MOVQ DX,R8
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 8(SI)
- MOVQ AX,R9
- MOVQ DX,R10
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 16(SI)
- MOVQ AX,R11
- MOVQ DX,R12
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 24(SI)
- MOVQ AX,R13
- MOVQ DX,R14
- MOVQ 0(SI),AX
- SHLQ $1,AX
- MULQ 32(SI)
- MOVQ AX,R15
- MOVQ DX,BX
- MOVQ 8(SI),AX
- MULQ 8(SI)
- ADDQ AX,R11
- ADCQ DX,R12
- MOVQ 8(SI),AX
- SHLQ $1,AX
- MULQ 16(SI)
- ADDQ AX,R13
- ADCQ DX,R14
- MOVQ 8(SI),AX
- SHLQ $1,AX
- MULQ 24(SI)
- ADDQ AX,R15
- ADCQ DX,BX
- MOVQ 8(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SI)
- ADDQ AX,CX
- ADCQ DX,R8
- MOVQ 16(SI),AX
- MULQ 16(SI)
- ADDQ AX,R15
- ADCQ DX,BX
- MOVQ 16(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 24(SI)
- ADDQ AX,CX
- ADCQ DX,R8
- MOVQ 16(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SI)
- ADDQ AX,R9
- ADCQ DX,R10
- MOVQ 24(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 24(SI)
- ADDQ AX,R9
- ADCQ DX,R10
- MOVQ 24(SI),DX
- IMUL3Q $38,DX,AX
- MULQ 32(SI)
- ADDQ AX,R11
- ADCQ DX,R12
- MOVQ 32(SI),DX
- IMUL3Q $19,DX,AX
- MULQ 32(SI)
- ADDQ AX,R13
- ADCQ DX,R14
- MOVQ $REDMASK51,SI
- SHLQ $13,R8:CX
- ANDQ SI,CX
- SHLQ $13,R10:R9
- ANDQ SI,R9
- ADDQ R8,R9
- SHLQ $13,R12:R11
- ANDQ SI,R11
- ADDQ R10,R11
- SHLQ $13,R14:R13
- ANDQ SI,R13
- ADDQ R12,R13
- SHLQ $13,BX:R15
- ANDQ SI,R15
- ADDQ R14,R15
- IMUL3Q $19,BX,DX
- ADDQ DX,CX
- MOVQ CX,DX
- SHRQ $51,DX
- ADDQ R9,DX
- ANDQ SI,CX
- MOVQ DX,R8
- SHRQ $51,DX
- ADDQ R11,DX
- ANDQ SI,R8
- MOVQ DX,R9
- SHRQ $51,DX
- ADDQ R13,DX
- ANDQ SI,R9
- MOVQ DX,AX
- SHRQ $51,DX
- ADDQ R15,DX
- ANDQ SI,AX
- MOVQ DX,R10
- SHRQ $51,DX
- IMUL3Q $19,DX,DX
- ADDQ DX,CX
- ANDQ SI,R10
- MOVQ CX,0(DI)
- MOVQ R8,8(DI)
- MOVQ R9,16(DI)
- MOVQ AX,24(DI)
- MOVQ R10,32(DI)
- RET
diff --git a/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go b/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go
deleted file mode 100644
index 5ba434b8..00000000
--- a/vendor/github.com/keybase/go-crypto/ed25519/ed25519.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ed25519 implements the Ed25519 signature algorithm. See
-// https://ed25519.cr.yp.to/.
-//
-// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
-// representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the RFC
-// 8032 private key as the “seed”.
-package ed25519
-
-// This code is a port of the public domain, “ref10” implementation of ed25519
-// from SUPERCOP.
-
-import (
- "bytes"
- "crypto"
- cryptorand "crypto/rand"
- "crypto/sha512"
- "errors"
- "io"
- "strconv"
-
- "github.com/keybase/go-crypto/ed25519/internal/edwards25519"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey []byte
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, priv[32:])
- return PublicKey(publicKey)
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:32])
- return seed
-}
-
-// Sign signs the given message with priv.
-// Ed25519 performs two passes over messages to be signed and therefore cannot
-// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
-// indicate the message hasn't been hashed. This can be achieved by passing
-// crypto.Hash(0) as the value for opts.
-func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
- if opts.HashFunc() != crypto.Hash(0) {
- return nil, errors.New("ed25519: cannot sign hashed message")
- }
-
- return Sign(priv, message), nil
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptorand.Reader
- }
-
- seed := make([]byte, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, privateKey[32:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- if l := len(seed); l != SeedSize {
- panic("ed25519: bad seed length: " + strconv.Itoa(l))
- }
-
- digest := sha512.Sum512(seed)
- digest[0] &= 248
- digest[31] &= 127
- digest[31] |= 64
-
- var A edwards25519.ExtendedGroupElement
- var hBytes [32]byte
- copy(hBytes[:], digest[:])
- edwards25519.GeScalarMultBase(&A, &hBytes)
- var publicKeyBytes [32]byte
- A.ToBytes(&publicKeyBytes)
-
- privateKey := make([]byte, PrivateKeySize)
- copy(privateKey, seed)
- copy(privateKey[32:], publicKeyBytes[:])
-
- return privateKey
-}
-
-// Sign signs the message with privateKey and returns a signature. It will
-// panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- if l := len(privateKey); l != PrivateKeySize {
- panic("ed25519: bad private key length: " + strconv.Itoa(l))
- }
-
- h := sha512.New()
- h.Write(privateKey[:32])
-
- var digest1, messageDigest, hramDigest [64]byte
- var expandedSecretKey [32]byte
- h.Sum(digest1[:0])
- copy(expandedSecretKey[:], digest1[:])
- expandedSecretKey[0] &= 248
- expandedSecretKey[31] &= 63
- expandedSecretKey[31] |= 64
-
- h.Reset()
- h.Write(digest1[32:])
- h.Write(message)
- h.Sum(messageDigest[:0])
-
- var messageDigestReduced [32]byte
- edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
- var R edwards25519.ExtendedGroupElement
- edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
-
- var encodedR [32]byte
- R.ToBytes(&encodedR)
-
- h.Reset()
- h.Write(encodedR[:])
- h.Write(privateKey[32:])
- h.Write(message)
- h.Sum(hramDigest[:0])
- var hramDigestReduced [32]byte
- edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
-
- var s [32]byte
- edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
-
- signature := make([]byte, SignatureSize)
- copy(signature[:], encodedR[:])
- copy(signature[32:], s[:])
-
- return signature
-}
-
-// Verify reports whether sig is a valid signature of message by publicKey. It
-// will panic if len(publicKey) is not PublicKeySize.
-func Verify(publicKey PublicKey, message, sig []byte) bool {
- if l := len(publicKey); l != PublicKeySize {
- panic("ed25519: bad public key length: " + strconv.Itoa(l))
- }
-
- if len(sig) != SignatureSize || sig[63]&224 != 0 {
- return false
- }
-
- var A edwards25519.ExtendedGroupElement
- var publicKeyBytes [32]byte
- copy(publicKeyBytes[:], publicKey)
- if !A.FromBytes(&publicKeyBytes) {
- return false
- }
- edwards25519.FeNeg(&A.X, &A.X)
- edwards25519.FeNeg(&A.T, &A.T)
-
- h := sha512.New()
- h.Write(sig[:32])
- h.Write(publicKey[:])
- h.Write(message)
- var digest [64]byte
- h.Sum(digest[:0])
-
- var hReduced [32]byte
- edwards25519.ScReduce(&hReduced, &digest)
-
- var R edwards25519.ProjectiveGroupElement
- var s [32]byte
- copy(s[:], sig[32:])
-
- // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
- // the range [0, order) in order to prevent signature malleability.
- if !edwards25519.ScMinimal(&s) {
- return false
- }
-
- edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
-
- var checkR [32]byte
- R.ToBytes(&checkR)
- return bytes.Equal(sig[:32], checkR[:])
-}
diff --git a/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go b/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go
deleted file mode 100644
index e39f086c..00000000
--- a/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/const.go
+++ /dev/null
@@ -1,1422 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-// These values are from the public domain, “ref10” implementation of ed25519
-// from SUPERCOP.
-
-// d is a constant in the Edwards curve equation.
-var d = FieldElement{
- -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116,
-}
-
-// d2 is 2*d.
-var d2 = FieldElement{
- -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199,
-}
-
-// SqrtM1 is the square-root of -1 in the field.
-var SqrtM1 = FieldElement{
- -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482,
-}
-
-// A is a constant in the Montgomery-form of curve25519.
-var A = FieldElement{
- 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-}
-
-// bi contains precomputed multiples of the base-point. See the Ed25519 paper
-// for a discussion about how these values are used.
-var bi = [8]PreComputedGroupElement{
- {
- FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
- FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
- FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
- },
- {
- FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
- FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
- FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
- },
- {
- FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
- FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
- FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
- },
- {
- FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
- FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
- FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
- },
- {
- FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877},
- FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951},
- FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784},
- },
- {
- FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436},
- FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918},
- FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877},
- },
- {
- FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800},
- FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305},
- FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300},
- },
- {
- FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876},
- FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619},
- FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683},
- },
-}
-
-// base contains precomputed multiples of the base-point. See the Ed25519 paper
-// for a discussion about how these values are used.
-var base = [32][8]PreComputedGroupElement{
- {
- {
- FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
- FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
- FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
- },
- {
- FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303},
- FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081},
- FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697},
- },
- {
- FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
- FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
- FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
- },
- {
- FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540},
- FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397},
- FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325},
- },
- {
- FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
- FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
- FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
- },
- {
- FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777},
- FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737},
- FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652},
- },
- {
- FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
- FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
- FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
- },
- {
- FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726},
- FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955},
- FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425},
- },
- },
- {
- {
- FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171},
- FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510},
- FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660},
- },
- {
- FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639},
- FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963},
- FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950},
- },
- {
- FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568},
- FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335},
- FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628},
- },
- {
- FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007},
- FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772},
- FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653},
- },
- {
- FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567},
- FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686},
- FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372},
- },
- {
- FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887},
- FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954},
- FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953},
- },
- {
- FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833},
- FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532},
- FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876},
- },
- {
- FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268},
- FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214},
- FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038},
- },
- },
- {
- {
- FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800},
- FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645},
- FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664},
- },
- {
- FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933},
- FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182},
- FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222},
- },
- {
- FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991},
- FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880},
- FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092},
- },
- {
- FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295},
- FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788},
- FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553},
- },
- {
- FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026},
- FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347},
- FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033},
- },
- {
- FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395},
- FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278},
- FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890},
- },
- {
- FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995},
- FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596},
- FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891},
- },
- {
- FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060},
- FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608},
- FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606},
- },
- },
- {
- {
- FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389},
- FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016},
- FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341},
- },
- {
- FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505},
- FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553},
- FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655},
- },
- {
- FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220},
- FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631},
- FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099},
- },
- {
- FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556},
- FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749},
- FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930},
- },
- {
- FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391},
- FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253},
- FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066},
- },
- {
- FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958},
- FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082},
- FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383},
- },
- {
- FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521},
- FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807},
- FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948},
- },
- {
- FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134},
- FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455},
- FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629},
- },
- },
- {
- {
- FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069},
- FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746},
- FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919},
- },
- {
- FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837},
- FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906},
- FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771},
- },
- {
- FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817},
- FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098},
- FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409},
- },
- {
- FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504},
- FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727},
- FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420},
- },
- {
- FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003},
- FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605},
- FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384},
- },
- {
- FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701},
- FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683},
- FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708},
- },
- {
- FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563},
- FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260},
- FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387},
- },
- {
- FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672},
- FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686},
- FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665},
- },
- },
- {
- {
- FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182},
- FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277},
- FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628},
- },
- {
- FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474},
- FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539},
- FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822},
- },
- {
- FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970},
- FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756},
- FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508},
- },
- {
- FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683},
- FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655},
- FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158},
- },
- {
- FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125},
- FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839},
- FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664},
- },
- {
- FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294},
- FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899},
- FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070},
- },
- {
- FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294},
- FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949},
- FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083},
- },
- {
- FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420},
- FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940},
- FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396},
- },
- },
- {
- {
- FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567},
- FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127},
- FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294},
- },
- {
- FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887},
- FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964},
- FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195},
- },
- {
- FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244},
- FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999},
- FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762},
- },
- {
- FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274},
- FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236},
- FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605},
- },
- {
- FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761},
- FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884},
- FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482},
- },
- {
- FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638},
- FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490},
- FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170},
- },
- {
- FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736},
- FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124},
- FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392},
- },
- {
- FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029},
- FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048},
- FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958},
- },
- },
- {
- {
- FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593},
- FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071},
- FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692},
- },
- {
- FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687},
- FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441},
- FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001},
- },
- {
- FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460},
- FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007},
- FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762},
- },
- {
- FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005},
- FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674},
- FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035},
- },
- {
- FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590},
- FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957},
- FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812},
- },
- {
- FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740},
- FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122},
- FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158},
- },
- {
- FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885},
- FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140},
- FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857},
- },
- {
- FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155},
- FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260},
- FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483},
- },
- },
- {
- {
- FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677},
- FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815},
- FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751},
- },
- {
- FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203},
- FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208},
- FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230},
- },
- {
- FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850},
- FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389},
- FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968},
- },
- {
- FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689},
- FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880},
- FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304},
- },
- {
- FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632},
- FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412},
- FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566},
- },
- {
- FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038},
- FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232},
- FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943},
- },
- {
- FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856},
- FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738},
- FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971},
- },
- {
- FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718},
- FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697},
- FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883},
- },
- },
- {
- {
- FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912},
- FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358},
- FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849},
- },
- {
- FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307},
- FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977},
- FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335},
- },
- {
- FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644},
- FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616},
- FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735},
- },
- {
- FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099},
- FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341},
- FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336},
- },
- {
- FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646},
- FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425},
- FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388},
- },
- {
- FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743},
- FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822},
- FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462},
- },
- {
- FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985},
- FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702},
- FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797},
- },
- {
- FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293},
- FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100},
- FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688},
- },
- },
- {
- {
- FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186},
- FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610},
- FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707},
- },
- {
- FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220},
- FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025},
- FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044},
- },
- {
- FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992},
- FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027},
- FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197},
- },
- {
- FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901},
- FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952},
- FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878},
- },
- {
- FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390},
- FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730},
- FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730},
- },
- {
- FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180},
- FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272},
- FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715},
- },
- {
- FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970},
- FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772},
- FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865},
- },
- {
- FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750},
- FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373},
- FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348},
- },
- },
- {
- {
- FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144},
- FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195},
- FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086},
- },
- {
- FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684},
- FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518},
- FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233},
- },
- {
- FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793},
- FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794},
- FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435},
- },
- {
- FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921},
- FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518},
- FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563},
- },
- {
- FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278},
- FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024},
- FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030},
- },
- {
- FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783},
- FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717},
- FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844},
- },
- {
- FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333},
- FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048},
- FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760},
- },
- {
- FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760},
- FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757},
- FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112},
- },
- },
- {
- {
- FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468},
- FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184},
- FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289},
- },
- {
- FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066},
- FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882},
- FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226},
- },
- {
- FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101},
- FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279},
- FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811},
- },
- {
- FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709},
- FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714},
- FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121},
- },
- {
- FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464},
- FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847},
- FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400},
- },
- {
- FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414},
- FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158},
- FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045},
- },
- {
- FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415},
- FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459},
- FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079},
- },
- {
- FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412},
- FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743},
- FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836},
- },
- },
- {
- {
- FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022},
- FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429},
- FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065},
- },
- {
- FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861},
- FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000},
- FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101},
- },
- {
- FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815},
- FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642},
- FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966},
- },
- {
- FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574},
- FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742},
- FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689},
- },
- {
- FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020},
- FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772},
- FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982},
- },
- {
- FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953},
- FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218},
- FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265},
- },
- {
- FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073},
- FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325},
- FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798},
- },
- {
- FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870},
- FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863},
- FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927},
- },
- },
- {
- {
- FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267},
- FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663},
- FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862},
- },
- {
- FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673},
- FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943},
- FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020},
- },
- {
- FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238},
- FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064},
- FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795},
- },
- {
- FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052},
- FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904},
- FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531},
- },
- {
- FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979},
- FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841},
- FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431},
- },
- {
- FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324},
- FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940},
- FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320},
- },
- {
- FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184},
- FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114},
- FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878},
- },
- {
- FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784},
- FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091},
- FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585},
- },
- },
- {
- {
- FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208},
- FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864},
- FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661},
- },
- {
- FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233},
- FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212},
- FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525},
- },
- {
- FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068},
- FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397},
- FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988},
- },
- {
- FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889},
- FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038},
- FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697},
- },
- {
- FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875},
- FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905},
- FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656},
- },
- {
- FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818},
- FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714},
- FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203},
- },
- {
- FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931},
- FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024},
- FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084},
- },
- {
- FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204},
- FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817},
- FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667},
- },
- },
- {
- {
- FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504},
- FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768},
- FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255},
- },
- {
- FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790},
- FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438},
- FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333},
- },
- {
- FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971},
- FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905},
- FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409},
- },
- {
- FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409},
- FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499},
- FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363},
- },
- {
- FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664},
- FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324},
- FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940},
- },
- {
- FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990},
- FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914},
- FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290},
- },
- {
- FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257},
- FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433},
- FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236},
- },
- {
- FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045},
- FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093},
- FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347},
- },
- },
- {
- {
- FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191},
- FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507},
- FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906},
- },
- {
- FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018},
- FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109},
- FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926},
- },
- {
- FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528},
- FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625},
- FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286},
- },
- {
- FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033},
- FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866},
- FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896},
- },
- {
- FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075},
- FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347},
- FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437},
- },
- {
- FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165},
- FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588},
- FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193},
- },
- {
- FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017},
- FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883},
- FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961},
- },
- {
- FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043},
- FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663},
- FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362},
- },
- },
- {
- {
- FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860},
- FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466},
- FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063},
- },
- {
- FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997},
- FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295},
- FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369},
- },
- {
- FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385},
- FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109},
- FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906},
- },
- {
- FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424},
- FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185},
- FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962},
- },
- {
- FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325},
- FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593},
- FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404},
- },
- {
- FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644},
- FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801},
- FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804},
- },
- {
- FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884},
- FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577},
- FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849},
- },
- {
- FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473},
- FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644},
- FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319},
- },
- },
- {
- {
- FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599},
- FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768},
- FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084},
- },
- {
- FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328},
- FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369},
- FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920},
- },
- {
- FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815},
- FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025},
- FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397},
- },
- {
- FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448},
- FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981},
- FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165},
- },
- {
- FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501},
- FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073},
- FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861},
- },
- {
- FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845},
- FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211},
- FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870},
- },
- {
- FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096},
- FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803},
- FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168},
- },
- {
- FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965},
- FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505},
- FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598},
- },
- },
- {
- {
- FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782},
- FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900},
- FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479},
- },
- {
- FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208},
- FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232},
- FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719},
- },
- {
- FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271},
- FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326},
- FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132},
- },
- {
- FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300},
- FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570},
- FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670},
- },
- {
- FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994},
- FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913},
- FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317},
- },
- {
- FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730},
- FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096},
- FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078},
- },
- {
- FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411},
- FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905},
- FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654},
- },
- {
- FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870},
- FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498},
- FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579},
- },
- },
- {
- {
- FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677},
- FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647},
- FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743},
- },
- {
- FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468},
- FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375},
- FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155},
- },
- {
- FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725},
- FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612},
- FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943},
- },
- {
- FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944},
- FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928},
- FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406},
- },
- {
- FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139},
- FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963},
- FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693},
- },
- {
- FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734},
- FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680},
- FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410},
- },
- {
- FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931},
- FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654},
- FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710},
- },
- {
- FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180},
- FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684},
- FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895},
- },
- },
- {
- {
- FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501},
- FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413},
- FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880},
- },
- {
- FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874},
- FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962},
- FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899},
- },
- {
- FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152},
- FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063},
- FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080},
- },
- {
- FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146},
- FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183},
- FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133},
- },
- {
- FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421},
- FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622},
- FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197},
- },
- {
- FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663},
- FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753},
- FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755},
- },
- {
- FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862},
- FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118},
- FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171},
- },
- {
- FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380},
- FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824},
- FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270},
- },
- },
- {
- {
- FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438},
- FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584},
- FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562},
- },
- {
- FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471},
- FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610},
- FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269},
- },
- {
- FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650},
- FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369},
- FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461},
- },
- {
- FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462},
- FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793},
- FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218},
- },
- {
- FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226},
- FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019},
- FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037},
- },
- {
- FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171},
- FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132},
- FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841},
- },
- {
- FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181},
- FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210},
- FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040},
- },
- {
- FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935},
- FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105},
- FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814},
- },
- },
- {
- {
- FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852},
- FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581},
- FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646},
- },
- {
- FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844},
- FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025},
- FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453},
- },
- {
- FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068},
- FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192},
- FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921},
- },
- {
- FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259},
- FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426},
- FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072},
- },
- {
- FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305},
- FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832},
- FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943},
- },
- {
- FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011},
- FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447},
- FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494},
- },
- {
- FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245},
- FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859},
- FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915},
- },
- {
- FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707},
- FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848},
- FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224},
- },
- },
- {
- {
- FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391},
- FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215},
- FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101},
- },
- {
- FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713},
- FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849},
- FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930},
- },
- {
- FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940},
- FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031},
- FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404},
- },
- {
- FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243},
- FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116},
- FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525},
- },
- {
- FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509},
- FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883},
- FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865},
- },
- {
- FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660},
- FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273},
- FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138},
- },
- {
- FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560},
- FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135},
- FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941},
- },
- {
- FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739},
- FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756},
- FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819},
- },
- },
- {
- {
- FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347},
- FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028},
- FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075},
- },
- {
- FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799},
- FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609},
- FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817},
- },
- {
- FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989},
- FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523},
- FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278},
- },
- {
- FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045},
- FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377},
- FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480},
- },
- {
- FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016},
- FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426},
- FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525},
- },
- {
- FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396},
- FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080},
- FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892},
- },
- {
- FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275},
- FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074},
- FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140},
- },
- {
- FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717},
- FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101},
- FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127},
- },
- },
- {
- {
- FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632},
- FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415},
- FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160},
- },
- {
- FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876},
- FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625},
- FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478},
- },
- {
- FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164},
- FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595},
- FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248},
- },
- {
- FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858},
- FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193},
- FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184},
- },
- {
- FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942},
- FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635},
- FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948},
- },
- {
- FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935},
- FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415},
- FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416},
- },
- {
- FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018},
- FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778},
- FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659},
- },
- {
- FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385},
- FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503},
- FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329},
- },
- },
- {
- {
- FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056},
- FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838},
- FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948},
- },
- {
- FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691},
- FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118},
- FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517},
- },
- {
- FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269},
- FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904},
- FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589},
- },
- {
- FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193},
- FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910},
- FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930},
- },
- {
- FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667},
- FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481},
- FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876},
- },
- {
- FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640},
- FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278},
- FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112},
- },
- {
- FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272},
- FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012},
- FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221},
- },
- {
- FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046},
- FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345},
- FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310},
- },
- },
- {
- {
- FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937},
- FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636},
- FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008},
- },
- {
- FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429},
- FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576},
- FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066},
- },
- {
- FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490},
- FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104},
- FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053},
- },
- {
- FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275},
- FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511},
- FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095},
- },
- {
- FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439},
- FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939},
- FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424},
- },
- {
- FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310},
- FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608},
- FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079},
- },
- {
- FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101},
- FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418},
- FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576},
- },
- {
- FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356},
- FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996},
- FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099},
- },
- },
- {
- {
- FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728},
- FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658},
- FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242},
- },
- {
- FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001},
- FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766},
- FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373},
- },
- {
- FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458},
- FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628},
- FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657},
- },
- {
- FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062},
- FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616},
- FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014},
- },
- {
- FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383},
- FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814},
- FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718},
- },
- {
- FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417},
- FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222},
- FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444},
- },
- {
- FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597},
- FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970},
- FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799},
- },
- {
- FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647},
- FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511},
- FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032},
- },
- },
- {
- {
- FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834},
- FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461},
- FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062},
- },
- {
- FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516},
- FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547},
- FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240},
- },
- {
- FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038},
- FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741},
- FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103},
- },
- {
- FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747},
- FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323},
- FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016},
- },
- {
- FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373},
- FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228},
- FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141},
- },
- {
- FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399},
- FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831},
- FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376},
- },
- {
- FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313},
- FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958},
- FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577},
- },
- {
- FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743},
- FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684},
- FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476},
- },
- },
-}
diff --git a/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go
deleted file mode 100644
index fd03c252..00000000
--- a/vendor/github.com/keybase/go-crypto/ed25519/internal/edwards25519/edwards25519.go
+++ /dev/null
@@ -1,1793 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import "encoding/binary"
-
-// This code is a port of the public domain, “ref10” implementation of ed25519
-// from SUPERCOP.
-
-// FieldElement represents an element of the field GF(2^255 - 19). An element
-// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
-// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
-// context.
-type FieldElement [10]int32
-
-var zero FieldElement
-
-func FeZero(fe *FieldElement) {
- copy(fe[:], zero[:])
-}
-
-func FeOne(fe *FieldElement) {
- FeZero(fe)
- fe[0] = 1
-}
-
-func FeAdd(dst, a, b *FieldElement) {
- dst[0] = a[0] + b[0]
- dst[1] = a[1] + b[1]
- dst[2] = a[2] + b[2]
- dst[3] = a[3] + b[3]
- dst[4] = a[4] + b[4]
- dst[5] = a[5] + b[5]
- dst[6] = a[6] + b[6]
- dst[7] = a[7] + b[7]
- dst[8] = a[8] + b[8]
- dst[9] = a[9] + b[9]
-}
-
-func FeSub(dst, a, b *FieldElement) {
- dst[0] = a[0] - b[0]
- dst[1] = a[1] - b[1]
- dst[2] = a[2] - b[2]
- dst[3] = a[3] - b[3]
- dst[4] = a[4] - b[4]
- dst[5] = a[5] - b[5]
- dst[6] = a[6] - b[6]
- dst[7] = a[7] - b[7]
- dst[8] = a[8] - b[8]
- dst[9] = a[9] - b[9]
-}
-
-func FeCopy(dst, src *FieldElement) {
- copy(dst[:], src[:])
-}
-
-// Replace (f,g) with (g,g) if b == 1;
-// replace (f,g) with (f,g) if b == 0.
-//
-// Preconditions: b in {0,1}.
-func FeCMove(f, g *FieldElement, b int32) {
- b = -b
- f[0] ^= b & (f[0] ^ g[0])
- f[1] ^= b & (f[1] ^ g[1])
- f[2] ^= b & (f[2] ^ g[2])
- f[3] ^= b & (f[3] ^ g[3])
- f[4] ^= b & (f[4] ^ g[4])
- f[5] ^= b & (f[5] ^ g[5])
- f[6] ^= b & (f[6] ^ g[6])
- f[7] ^= b & (f[7] ^ g[7])
- f[8] ^= b & (f[8] ^ g[8])
- f[9] ^= b & (f[9] ^ g[9])
-}
-
-func load3(in []byte) int64 {
- var r int64
- r = int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- return r
-}
-
-func load4(in []byte) int64 {
- var r int64
- r = int64(in[0])
- r |= int64(in[1]) << 8
- r |= int64(in[2]) << 16
- r |= int64(in[3]) << 24
- return r
-}
-
-func FeFromBytes(dst *FieldElement, src *[32]byte) {
- h0 := load4(src[:])
- h1 := load3(src[4:]) << 6
- h2 := load3(src[7:]) << 5
- h3 := load3(src[10:]) << 3
- h4 := load3(src[13:]) << 2
- h5 := load4(src[16:])
- h6 := load3(src[20:]) << 7
- h7 := load3(src[23:]) << 5
- h8 := load3(src[26:]) << 4
- h9 := (load3(src[29:]) & 8388607) << 2
-
- FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-// FeToBytes marshals h to s.
-// Preconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Write p=2^255-19; q=floor(h/p).
-// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
-//
-// Proof:
-// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
-// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
-//
-// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
-// Then 0> 25
- q = (h[0] + q) >> 26
- q = (h[1] + q) >> 25
- q = (h[2] + q) >> 26
- q = (h[3] + q) >> 25
- q = (h[4] + q) >> 26
- q = (h[5] + q) >> 25
- q = (h[6] + q) >> 26
- q = (h[7] + q) >> 25
- q = (h[8] + q) >> 26
- q = (h[9] + q) >> 25
-
- // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
- h[0] += 19 * q
- // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
-
- carry[0] = h[0] >> 26
- h[1] += carry[0]
- h[0] -= carry[0] << 26
- carry[1] = h[1] >> 25
- h[2] += carry[1]
- h[1] -= carry[1] << 25
- carry[2] = h[2] >> 26
- h[3] += carry[2]
- h[2] -= carry[2] << 26
- carry[3] = h[3] >> 25
- h[4] += carry[3]
- h[3] -= carry[3] << 25
- carry[4] = h[4] >> 26
- h[5] += carry[4]
- h[4] -= carry[4] << 26
- carry[5] = h[5] >> 25
- h[6] += carry[5]
- h[5] -= carry[5] << 25
- carry[6] = h[6] >> 26
- h[7] += carry[6]
- h[6] -= carry[6] << 26
- carry[7] = h[7] >> 25
- h[8] += carry[7]
- h[7] -= carry[7] << 25
- carry[8] = h[8] >> 26
- h[9] += carry[8]
- h[8] -= carry[8] << 26
- carry[9] = h[9] >> 25
- h[9] -= carry[9] << 25
- // h10 = carry9
-
- // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
- // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
- // evidently 2^255 h10-2^255 q = 0.
- // Goal: Output h[0]+...+2^230 h[9].
-
- s[0] = byte(h[0] >> 0)
- s[1] = byte(h[0] >> 8)
- s[2] = byte(h[0] >> 16)
- s[3] = byte((h[0] >> 24) | (h[1] << 2))
- s[4] = byte(h[1] >> 6)
- s[5] = byte(h[1] >> 14)
- s[6] = byte((h[1] >> 22) | (h[2] << 3))
- s[7] = byte(h[2] >> 5)
- s[8] = byte(h[2] >> 13)
- s[9] = byte((h[2] >> 21) | (h[3] << 5))
- s[10] = byte(h[3] >> 3)
- s[11] = byte(h[3] >> 11)
- s[12] = byte((h[3] >> 19) | (h[4] << 6))
- s[13] = byte(h[4] >> 2)
- s[14] = byte(h[4] >> 10)
- s[15] = byte(h[4] >> 18)
- s[16] = byte(h[5] >> 0)
- s[17] = byte(h[5] >> 8)
- s[18] = byte(h[5] >> 16)
- s[19] = byte((h[5] >> 24) | (h[6] << 1))
- s[20] = byte(h[6] >> 7)
- s[21] = byte(h[6] >> 15)
- s[22] = byte((h[6] >> 23) | (h[7] << 3))
- s[23] = byte(h[7] >> 5)
- s[24] = byte(h[7] >> 13)
- s[25] = byte((h[7] >> 21) | (h[8] << 4))
- s[26] = byte(h[8] >> 4)
- s[27] = byte(h[8] >> 12)
- s[28] = byte((h[8] >> 20) | (h[9] << 6))
- s[29] = byte(h[9] >> 2)
- s[30] = byte(h[9] >> 10)
- s[31] = byte(h[9] >> 18)
-}
-
-func FeIsNegative(f *FieldElement) byte {
- var s [32]byte
- FeToBytes(&s, f)
- return s[0] & 1
-}
-
-func FeIsNonZero(f *FieldElement) int32 {
- var s [32]byte
- FeToBytes(&s, f)
- var x uint8
- for _, b := range s {
- x |= b
- }
- x |= x >> 4
- x |= x >> 2
- x |= x >> 1
- return int32(x & 1)
-}
-
-// FeNeg sets h = -f
-//
-// Preconditions:
-// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func FeNeg(h, f *FieldElement) {
- h[0] = -f[0]
- h[1] = -f[1]
- h[2] = -f[2]
- h[3] = -f[3]
- h[4] = -f[4]
- h[5] = -f[5]
- h[6] = -f[6]
- h[7] = -f[7]
- h[8] = -f[8]
- h[9] = -f[9]
-}
-
-func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
- var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64
-
- /*
- |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
- i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
- |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
- i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
- */
-
- c0 = (h0 + (1 << 25)) >> 26
- h1 += c0
- h0 -= c0 << 26
- c4 = (h4 + (1 << 25)) >> 26
- h5 += c4
- h4 -= c4 << 26
- /* |h0| <= 2^25 */
- /* |h4| <= 2^25 */
- /* |h1| <= 1.51*2^58 */
- /* |h5| <= 1.51*2^58 */
-
- c1 = (h1 + (1 << 24)) >> 25
- h2 += c1
- h1 -= c1 << 25
- c5 = (h5 + (1 << 24)) >> 25
- h6 += c5
- h5 -= c5 << 25
- /* |h1| <= 2^24; from now on fits into int32 */
- /* |h5| <= 2^24; from now on fits into int32 */
- /* |h2| <= 1.21*2^59 */
- /* |h6| <= 1.21*2^59 */
-
- c2 = (h2 + (1 << 25)) >> 26
- h3 += c2
- h2 -= c2 << 26
- c6 = (h6 + (1 << 25)) >> 26
- h7 += c6
- h6 -= c6 << 26
- /* |h2| <= 2^25; from now on fits into int32 unchanged */
- /* |h6| <= 2^25; from now on fits into int32 unchanged */
- /* |h3| <= 1.51*2^58 */
- /* |h7| <= 1.51*2^58 */
-
- c3 = (h3 + (1 << 24)) >> 25
- h4 += c3
- h3 -= c3 << 25
- c7 = (h7 + (1 << 24)) >> 25
- h8 += c7
- h7 -= c7 << 25
- /* |h3| <= 2^24; from now on fits into int32 unchanged */
- /* |h7| <= 2^24; from now on fits into int32 unchanged */
- /* |h4| <= 1.52*2^33 */
- /* |h8| <= 1.52*2^33 */
-
- c4 = (h4 + (1 << 25)) >> 26
- h5 += c4
- h4 -= c4 << 26
- c8 = (h8 + (1 << 25)) >> 26
- h9 += c8
- h8 -= c8 << 26
- /* |h4| <= 2^25; from now on fits into int32 unchanged */
- /* |h8| <= 2^25; from now on fits into int32 unchanged */
- /* |h5| <= 1.01*2^24 */
- /* |h9| <= 1.51*2^58 */
-
- c9 = (h9 + (1 << 24)) >> 25
- h0 += c9 * 19
- h9 -= c9 << 25
- /* |h9| <= 2^24; from now on fits into int32 unchanged */
- /* |h0| <= 1.8*2^37 */
-
- c0 = (h0 + (1 << 25)) >> 26
- h1 += c0
- h0 -= c0 << 26
- /* |h0| <= 2^25; from now on fits into int32 unchanged */
- /* |h1| <= 1.01*2^24 */
-
- h[0] = int32(h0)
- h[1] = int32(h1)
- h[2] = int32(h2)
- h[3] = int32(h3)
- h[4] = int32(h4)
- h[5] = int32(h5)
- h[6] = int32(h6)
- h[7] = int32(h7)
- h[8] = int32(h8)
- h[9] = int32(h9)
-}
-
-// FeMul calculates h = f * g
-// Can overlap h with f or g.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-//
-// Notes on implementation strategy:
-//
-// Using schoolbook multiplication.
-// Karatsuba would save a little in some cost models.
-//
-// Most multiplications by 2 and 19 are 32-bit precomputations;
-// cheaper than 64-bit postcomputations.
-//
-// There is one remaining multiplication by 19 in the carry chain;
-// one *19 precomputation can be merged into this,
-// but the resulting data flow is considerably less clean.
-//
-// There are 12 carries below.
-// 10 of them are 2-way parallelizable and vectorizable.
-// Can get away with 11 carries, but then data flow is much deeper.
-//
-// With tighter constraints on inputs, can squeeze carries into int32.
-func FeMul(h, f, g *FieldElement) {
- f0 := int64(f[0])
- f1 := int64(f[1])
- f2 := int64(f[2])
- f3 := int64(f[3])
- f4 := int64(f[4])
- f5 := int64(f[5])
- f6 := int64(f[6])
- f7 := int64(f[7])
- f8 := int64(f[8])
- f9 := int64(f[9])
-
- f1_2 := int64(2 * f[1])
- f3_2 := int64(2 * f[3])
- f5_2 := int64(2 * f[5])
- f7_2 := int64(2 * f[7])
- f9_2 := int64(2 * f[9])
-
- g0 := int64(g[0])
- g1 := int64(g[1])
- g2 := int64(g[2])
- g3 := int64(g[3])
- g4 := int64(g[4])
- g5 := int64(g[5])
- g6 := int64(g[6])
- g7 := int64(g[7])
- g8 := int64(g[8])
- g9 := int64(g[9])
-
- g1_19 := int64(19 * g[1]) /* 1.4*2^29 */
- g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */
- g3_19 := int64(19 * g[3])
- g4_19 := int64(19 * g[4])
- g5_19 := int64(19 * g[5])
- g6_19 := int64(19 * g[6])
- g7_19 := int64(19 * g[7])
- g8_19 := int64(19 * g[8])
- g9_19 := int64(19 * g[9])
-
- h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19
- h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19
- h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19
- h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19
- h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19
- h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19
- h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19
- h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19
- h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19
- h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0
-
- FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
- f0 := int64(f[0])
- f1 := int64(f[1])
- f2 := int64(f[2])
- f3 := int64(f[3])
- f4 := int64(f[4])
- f5 := int64(f[5])
- f6 := int64(f[6])
- f7 := int64(f[7])
- f8 := int64(f[8])
- f9 := int64(f[9])
- f0_2 := int64(2 * f[0])
- f1_2 := int64(2 * f[1])
- f2_2 := int64(2 * f[2])
- f3_2 := int64(2 * f[3])
- f4_2 := int64(2 * f[4])
- f5_2 := int64(2 * f[5])
- f6_2 := int64(2 * f[6])
- f7_2 := int64(2 * f[7])
- f5_38 := 38 * f5 // 1.31*2^30
- f6_19 := 19 * f6 // 1.31*2^30
- f7_38 := 38 * f7 // 1.31*2^30
- f8_19 := 19 * f8 // 1.31*2^30
- f9_38 := 38 * f9 // 1.31*2^30
-
- h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38
- h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19
- h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19
- h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38
- h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38
- h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19
- h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19
- h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38
- h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38
- h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5
-
- return
-}
-
-// FeSquare calculates h = f*f. Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
-func FeSquare(h, f *FieldElement) {
- h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
- FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-// FeSquare2 sets h = 2 * f * f
-//
-// Can overlap h with f.
-//
-// Preconditions:
-// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
-//
-// Postconditions:
-// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
-// See fe_mul.c for discussion of implementation strategy.
-func FeSquare2(h, f *FieldElement) {
- h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
-
- h0 += h0
- h1 += h1
- h2 += h2
- h3 += h3
- h4 += h4
- h5 += h5
- h6 += h6
- h7 += h7
- h8 += h8
- h9 += h9
-
- FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
-}
-
-func FeInvert(out, z *FieldElement) {
- var t0, t1, t2, t3 FieldElement
- var i int
-
- FeSquare(&t0, z) // 2^1
- FeSquare(&t1, &t0) // 2^2
- for i = 1; i < 2; i++ { // 2^3
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, z, &t1) // 2^3 + 2^0
- FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0
- FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1
- FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0
- FeSquare(&t2, &t1) // 5,4,3,2,1
- for i = 1; i < 5; i++ { // 9,8,7,6,5
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0
- FeSquare(&t2, &t1) // 10..1
- for i = 1; i < 10; i++ { // 19..10
- FeSquare(&t2, &t2)
- }
- FeMul(&t2, &t2, &t1) // 19..0
- FeSquare(&t3, &t2) // 20..1
- for i = 1; i < 20; i++ { // 39..20
- FeSquare(&t3, &t3)
- }
- FeMul(&t2, &t3, &t2) // 39..0
- FeSquare(&t2, &t2) // 40..1
- for i = 1; i < 10; i++ { // 49..10
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1) // 49..0
- FeSquare(&t2, &t1) // 50..1
- for i = 1; i < 50; i++ { // 99..50
- FeSquare(&t2, &t2)
- }
- FeMul(&t2, &t2, &t1) // 99..0
- FeSquare(&t3, &t2) // 100..1
- for i = 1; i < 100; i++ { // 199..100
- FeSquare(&t3, &t3)
- }
- FeMul(&t2, &t3, &t2) // 199..0
- FeSquare(&t2, &t2) // 200..1
- for i = 1; i < 50; i++ { // 249..50
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1) // 249..0
- FeSquare(&t1, &t1) // 250..1
- for i = 1; i < 5; i++ { // 254..5
- FeSquare(&t1, &t1)
- }
- FeMul(out, &t1, &t0) // 254..5,3,1,0
-}
-
-func fePow22523(out, z *FieldElement) {
- var t0, t1, t2 FieldElement
- var i int
-
- FeSquare(&t0, z)
- for i = 1; i < 1; i++ {
- FeSquare(&t0, &t0)
- }
- FeSquare(&t1, &t0)
- for i = 1; i < 2; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, z, &t1)
- FeMul(&t0, &t0, &t1)
- FeSquare(&t0, &t0)
- for i = 1; i < 1; i++ {
- FeSquare(&t0, &t0)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t1, &t0)
- for i = 1; i < 5; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t1, &t0)
- for i = 1; i < 10; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, &t1, &t0)
- FeSquare(&t2, &t1)
- for i = 1; i < 20; i++ {
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1)
- FeSquare(&t1, &t1)
- for i = 1; i < 10; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t1, &t0)
- for i = 1; i < 50; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t1, &t1, &t0)
- FeSquare(&t2, &t1)
- for i = 1; i < 100; i++ {
- FeSquare(&t2, &t2)
- }
- FeMul(&t1, &t2, &t1)
- FeSquare(&t1, &t1)
- for i = 1; i < 50; i++ {
- FeSquare(&t1, &t1)
- }
- FeMul(&t0, &t1, &t0)
- FeSquare(&t0, &t0)
- for i = 1; i < 2; i++ {
- FeSquare(&t0, &t0)
- }
- FeMul(out, &t0, z)
-}
-
-// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 *
-// y^2 where d = -121665/121666.
-//
-// Several representations are used:
-// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z
-// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT
-// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T
-// PreComputedGroupElement: (y+x,y-x,2dxy)
-
-type ProjectiveGroupElement struct {
- X, Y, Z FieldElement
-}
-
-type ExtendedGroupElement struct {
- X, Y, Z, T FieldElement
-}
-
-type CompletedGroupElement struct {
- X, Y, Z, T FieldElement
-}
-
-type PreComputedGroupElement struct {
- yPlusX, yMinusX, xy2d FieldElement
-}
-
-type CachedGroupElement struct {
- yPlusX, yMinusX, Z, T2d FieldElement
-}
-
-func (p *ProjectiveGroupElement) Zero() {
- FeZero(&p.X)
- FeOne(&p.Y)
- FeOne(&p.Z)
-}
-
-func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) {
- var t0 FieldElement
-
- FeSquare(&r.X, &p.X)
- FeSquare(&r.Z, &p.Y)
- FeSquare2(&r.T, &p.Z)
- FeAdd(&r.Y, &p.X, &p.Y)
- FeSquare(&t0, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.X)
- FeSub(&r.Z, &r.Z, &r.X)
- FeSub(&r.X, &t0, &r.Y)
- FeSub(&r.T, &r.T, &r.Z)
-}
-
-func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) {
- var recip, x, y FieldElement
-
- FeInvert(&recip, &p.Z)
- FeMul(&x, &p.X, &recip)
- FeMul(&y, &p.Y, &recip)
- FeToBytes(s, &y)
- s[31] ^= FeIsNegative(&x) << 7
-}
-
-func (p *ExtendedGroupElement) Zero() {
- FeZero(&p.X)
- FeOne(&p.Y)
- FeOne(&p.Z)
- FeZero(&p.T)
-}
-
-func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) {
- var q ProjectiveGroupElement
- p.ToProjective(&q)
- q.Double(r)
-}
-
-func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) {
- FeAdd(&r.yPlusX, &p.Y, &p.X)
- FeSub(&r.yMinusX, &p.Y, &p.X)
- FeCopy(&r.Z, &p.Z)
- FeMul(&r.T2d, &p.T, &d2)
-}
-
-func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) {
- FeCopy(&r.X, &p.X)
- FeCopy(&r.Y, &p.Y)
- FeCopy(&r.Z, &p.Z)
-}
-
-func (p *ExtendedGroupElement) ToBytes(s *[32]byte) {
- var recip, x, y FieldElement
-
- FeInvert(&recip, &p.Z)
- FeMul(&x, &p.X, &recip)
- FeMul(&y, &p.Y, &recip)
- FeToBytes(s, &y)
- s[31] ^= FeIsNegative(&x) << 7
-}
-
-func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool {
- var u, v, v3, vxx, check FieldElement
-
- FeFromBytes(&p.Y, s)
- FeOne(&p.Z)
- FeSquare(&u, &p.Y)
- FeMul(&v, &u, &d)
- FeSub(&u, &u, &p.Z) // y = y^2-1
- FeAdd(&v, &v, &p.Z) // v = dy^2+1
-
- FeSquare(&v3, &v)
- FeMul(&v3, &v3, &v) // v3 = v^3
- FeSquare(&p.X, &v3)
- FeMul(&p.X, &p.X, &v)
- FeMul(&p.X, &p.X, &u) // x = uv^7
-
- fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8)
- FeMul(&p.X, &p.X, &v3)
- FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8)
-
- var tmpX, tmp2 [32]byte
-
- FeSquare(&vxx, &p.X)
- FeMul(&vxx, &vxx, &v)
- FeSub(&check, &vxx, &u) // vx^2-u
- if FeIsNonZero(&check) == 1 {
- FeAdd(&check, &vxx, &u) // vx^2+u
- if FeIsNonZero(&check) == 1 {
- return false
- }
- FeMul(&p.X, &p.X, &SqrtM1)
-
- FeToBytes(&tmpX, &p.X)
- for i, v := range tmpX {
- tmp2[31-i] = v
- }
- }
-
- if FeIsNegative(&p.X) != (s[31] >> 7) {
- FeNeg(&p.X, &p.X)
- }
-
- FeMul(&p.T, &p.X, &p.Y)
- return true
-}
-
-func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) {
- FeMul(&r.X, &p.X, &p.T)
- FeMul(&r.Y, &p.Y, &p.Z)
- FeMul(&r.Z, &p.Z, &p.T)
-}
-
-func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) {
- FeMul(&r.X, &p.X, &p.T)
- FeMul(&r.Y, &p.Y, &p.Z)
- FeMul(&r.Z, &p.Z, &p.T)
- FeMul(&r.T, &p.X, &p.Y)
-}
-
-func (p *PreComputedGroupElement) Zero() {
- FeOne(&p.yPlusX)
- FeOne(&p.yMinusX)
- FeZero(&p.xy2d)
-}
-
-func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yPlusX)
- FeMul(&r.Y, &r.Y, &q.yMinusX)
- FeMul(&r.T, &q.T2d, &p.T)
- FeMul(&r.X, &p.Z, &q.Z)
- FeAdd(&t0, &r.X, &r.X)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeAdd(&r.Z, &t0, &r.T)
- FeSub(&r.T, &t0, &r.T)
-}
-
-func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yMinusX)
- FeMul(&r.Y, &r.Y, &q.yPlusX)
- FeMul(&r.T, &q.T2d, &p.T)
- FeMul(&r.X, &p.Z, &q.Z)
- FeAdd(&t0, &r.X, &r.X)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeSub(&r.Z, &t0, &r.T)
- FeAdd(&r.T, &t0, &r.T)
-}
-
-func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yPlusX)
- FeMul(&r.Y, &r.Y, &q.yMinusX)
- FeMul(&r.T, &q.xy2d, &p.T)
- FeAdd(&t0, &p.Z, &p.Z)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeAdd(&r.Z, &t0, &r.T)
- FeSub(&r.T, &t0, &r.T)
-}
-
-func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
- var t0 FieldElement
-
- FeAdd(&r.X, &p.Y, &p.X)
- FeSub(&r.Y, &p.Y, &p.X)
- FeMul(&r.Z, &r.X, &q.yMinusX)
- FeMul(&r.Y, &r.Y, &q.yPlusX)
- FeMul(&r.T, &q.xy2d, &p.T)
- FeAdd(&t0, &p.Z, &p.Z)
- FeSub(&r.X, &r.Z, &r.Y)
- FeAdd(&r.Y, &r.Z, &r.Y)
- FeSub(&r.Z, &t0, &r.T)
- FeAdd(&r.T, &t0, &r.T)
-}
-
-func slide(r *[256]int8, a *[32]byte) {
- for i := range r {
- r[i] = int8(1 & (a[i>>3] >> uint(i&7)))
- }
-
- for i := range r {
- if r[i] != 0 {
- for b := 1; b <= 6 && i+b < 256; b++ {
- if r[i+b] != 0 {
- if r[i]+(r[i+b]<= -15 {
- r[i] -= r[i+b] << uint(b)
- for k := i + b; k < 256; k++ {
- if r[k] == 0 {
- r[k] = 1
- break
- }
- r[k] = 0
- }
- } else {
- break
- }
- }
- }
- }
- }
-}
-
-// GeDoubleScalarMultVartime sets r = a*A + b*B
-// where a = a[0]+256*a[1]+...+256^31 a[31].
-// and b = b[0]+256*b[1]+...+256^31 b[31].
-// B is the Ed25519 base point (x,4/5) with x positive.
-func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) {
- var aSlide, bSlide [256]int8
- var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A
- var t CompletedGroupElement
- var u, A2 ExtendedGroupElement
- var i int
-
- slide(&aSlide, a)
- slide(&bSlide, b)
-
- A.ToCached(&Ai[0])
- A.Double(&t)
- t.ToExtended(&A2)
-
- for i := 0; i < 7; i++ {
- geAdd(&t, &A2, &Ai[i])
- t.ToExtended(&u)
- u.ToCached(&Ai[i+1])
- }
-
- r.Zero()
-
- for i = 255; i >= 0; i-- {
- if aSlide[i] != 0 || bSlide[i] != 0 {
- break
- }
- }
-
- for ; i >= 0; i-- {
- r.Double(&t)
-
- if aSlide[i] > 0 {
- t.ToExtended(&u)
- geAdd(&t, &u, &Ai[aSlide[i]/2])
- } else if aSlide[i] < 0 {
- t.ToExtended(&u)
- geSub(&t, &u, &Ai[(-aSlide[i])/2])
- }
-
- if bSlide[i] > 0 {
- t.ToExtended(&u)
- geMixedAdd(&t, &u, &bi[bSlide[i]/2])
- } else if bSlide[i] < 0 {
- t.ToExtended(&u)
- geMixedSub(&t, &u, &bi[(-bSlide[i])/2])
- }
-
- t.ToProjective(r)
- }
-}
-
-// equal returns 1 if b == c and 0 otherwise, assuming that b and c are
-// non-negative.
-func equal(b, c int32) int32 {
- x := uint32(b ^ c)
- x--
- return int32(x >> 31)
-}
-
-// negative returns 1 if b < 0 and 0 otherwise.
-func negative(b int32) int32 {
- return (b >> 31) & 1
-}
-
-func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) {
- FeCMove(&t.yPlusX, &u.yPlusX, b)
- FeCMove(&t.yMinusX, &u.yMinusX, b)
- FeCMove(&t.xy2d, &u.xy2d, b)
-}
-
-func selectPoint(t *PreComputedGroupElement, pos int32, b int32) {
- var minusT PreComputedGroupElement
- bNegative := negative(b)
- bAbs := b - (((-bNegative) & b) << 1)
-
- t.Zero()
- for i := int32(0); i < 8; i++ {
- PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1))
- }
- FeCopy(&minusT.yPlusX, &t.yMinusX)
- FeCopy(&minusT.yMinusX, &t.yPlusX)
- FeNeg(&minusT.xy2d, &t.xy2d)
- PreComputedGroupElementCMove(t, &minusT, bNegative)
-}
-
-// GeScalarMultBase computes h = a*B, where
-// a = a[0]+256*a[1]+...+256^31 a[31]
-// B is the Ed25519 base point (x,4/5) with x positive.
-//
-// Preconditions:
-// a[31] <= 127
-func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) {
- var e [64]int8
-
- for i, v := range a {
- e[2*i] = int8(v & 15)
- e[2*i+1] = int8((v >> 4) & 15)
- }
-
- // each e[i] is between 0 and 15 and e[63] is between 0 and 7.
-
- carry := int8(0)
- for i := 0; i < 63; i++ {
- e[i] += carry
- carry = (e[i] + 8) >> 4
- e[i] -= carry << 4
- }
- e[63] += carry
- // each e[i] is between -8 and 8.
-
- h.Zero()
- var t PreComputedGroupElement
- var r CompletedGroupElement
- for i := int32(1); i < 64; i += 2 {
- selectPoint(&t, i/2, int32(e[i]))
- geMixedAdd(&r, h, &t)
- r.ToExtended(h)
- }
-
- var s ProjectiveGroupElement
-
- h.Double(&r)
- r.ToProjective(&s)
- s.Double(&r)
- r.ToProjective(&s)
- s.Double(&r)
- r.ToProjective(&s)
- s.Double(&r)
- r.ToExtended(h)
-
- for i := int32(0); i < 64; i += 2 {
- selectPoint(&t, i/2, int32(e[i]))
- geMixedAdd(&r, h, &t)
- r.ToExtended(h)
- }
-}
-
-// The scalars are GF(2^252 + 27742317777372353535851937790883648493).
-
-// Input:
-// a[0]+256*a[1]+...+256^31*a[31] = a
-// b[0]+256*b[1]+...+256^31*b[31] = b
-// c[0]+256*c[1]+...+256^31*c[31] = c
-//
-// Output:
-// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
-// where l = 2^252 + 27742317777372353535851937790883648493.
-func ScMulAdd(s, a, b, c *[32]byte) {
- a0 := 2097151 & load3(a[:])
- a1 := 2097151 & (load4(a[2:]) >> 5)
- a2 := 2097151 & (load3(a[5:]) >> 2)
- a3 := 2097151 & (load4(a[7:]) >> 7)
- a4 := 2097151 & (load4(a[10:]) >> 4)
- a5 := 2097151 & (load3(a[13:]) >> 1)
- a6 := 2097151 & (load4(a[15:]) >> 6)
- a7 := 2097151 & (load3(a[18:]) >> 3)
- a8 := 2097151 & load3(a[21:])
- a9 := 2097151 & (load4(a[23:]) >> 5)
- a10 := 2097151 & (load3(a[26:]) >> 2)
- a11 := (load4(a[28:]) >> 7)
- b0 := 2097151 & load3(b[:])
- b1 := 2097151 & (load4(b[2:]) >> 5)
- b2 := 2097151 & (load3(b[5:]) >> 2)
- b3 := 2097151 & (load4(b[7:]) >> 7)
- b4 := 2097151 & (load4(b[10:]) >> 4)
- b5 := 2097151 & (load3(b[13:]) >> 1)
- b6 := 2097151 & (load4(b[15:]) >> 6)
- b7 := 2097151 & (load3(b[18:]) >> 3)
- b8 := 2097151 & load3(b[21:])
- b9 := 2097151 & (load4(b[23:]) >> 5)
- b10 := 2097151 & (load3(b[26:]) >> 2)
- b11 := (load4(b[28:]) >> 7)
- c0 := 2097151 & load3(c[:])
- c1 := 2097151 & (load4(c[2:]) >> 5)
- c2 := 2097151 & (load3(c[5:]) >> 2)
- c3 := 2097151 & (load4(c[7:]) >> 7)
- c4 := 2097151 & (load4(c[10:]) >> 4)
- c5 := 2097151 & (load3(c[13:]) >> 1)
- c6 := 2097151 & (load4(c[15:]) >> 6)
- c7 := 2097151 & (load3(c[18:]) >> 3)
- c8 := 2097151 & load3(c[21:])
- c9 := 2097151 & (load4(c[23:]) >> 5)
- c10 := 2097151 & (load3(c[26:]) >> 2)
- c11 := (load4(c[28:]) >> 7)
- var carry [23]int64
-
- s0 := c0 + a0*b0
- s1 := c1 + a0*b1 + a1*b0
- s2 := c2 + a0*b2 + a1*b1 + a2*b0
- s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0
- s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0
- s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0
- s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0
- s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0
- s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0
- s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0
- s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0
- s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0
- s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1
- s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2
- s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3
- s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4
- s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5
- s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6
- s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7
- s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8
- s20 := a9*b11 + a10*b10 + a11*b9
- s21 := a10*b11 + a11*b10
- s22 := a11 * b11
- s23 := int64(0)
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
- carry[18] = (s18 + (1 << 20)) >> 21
- s19 += carry[18]
- s18 -= carry[18] << 21
- carry[20] = (s20 + (1 << 20)) >> 21
- s21 += carry[20]
- s20 -= carry[20] << 21
- carry[22] = (s22 + (1 << 20)) >> 21
- s23 += carry[22]
- s22 -= carry[22] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
- carry[17] = (s17 + (1 << 20)) >> 21
- s18 += carry[17]
- s17 -= carry[17] << 21
- carry[19] = (s19 + (1 << 20)) >> 21
- s20 += carry[19]
- s19 -= carry[19] << 21
- carry[21] = (s21 + (1 << 20)) >> 21
- s22 += carry[21]
- s21 -= carry[21] << 21
-
- s11 += s23 * 666643
- s12 += s23 * 470296
- s13 += s23 * 654183
- s14 -= s23 * 997805
- s15 += s23 * 136657
- s16 -= s23 * 683901
- s23 = 0
-
- s10 += s22 * 666643
- s11 += s22 * 470296
- s12 += s22 * 654183
- s13 -= s22 * 997805
- s14 += s22 * 136657
- s15 -= s22 * 683901
- s22 = 0
-
- s9 += s21 * 666643
- s10 += s21 * 470296
- s11 += s21 * 654183
- s12 -= s21 * 997805
- s13 += s21 * 136657
- s14 -= s21 * 683901
- s21 = 0
-
- s8 += s20 * 666643
- s9 += s20 * 470296
- s10 += s20 * 654183
- s11 -= s20 * 997805
- s12 += s20 * 136657
- s13 -= s20 * 683901
- s20 = 0
-
- s7 += s19 * 666643
- s8 += s19 * 470296
- s9 += s19 * 654183
- s10 -= s19 * 997805
- s11 += s19 * 136657
- s12 -= s19 * 683901
- s19 = 0
-
- s6 += s18 * 666643
- s7 += s18 * 470296
- s8 += s18 * 654183
- s9 -= s18 * 997805
- s10 += s18 * 136657
- s11 -= s18 * 683901
- s18 = 0
-
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
-
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
-
- s5 += s17 * 666643
- s6 += s17 * 470296
- s7 += s17 * 654183
- s8 -= s17 * 997805
- s9 += s17 * 136657
- s10 -= s17 * 683901
- s17 = 0
-
- s4 += s16 * 666643
- s5 += s16 * 470296
- s6 += s16 * 654183
- s7 -= s16 * 997805
- s8 += s16 * 136657
- s9 -= s16 * 683901
- s16 = 0
-
- s3 += s15 * 666643
- s4 += s15 * 470296
- s5 += s15 * 654183
- s6 -= s15 * 997805
- s7 += s15 * 136657
- s8 -= s15 * 683901
- s15 = 0
-
- s2 += s14 * 666643
- s3 += s14 * 470296
- s4 += s14 * 654183
- s5 -= s14 * 997805
- s6 += s14 * 136657
- s7 -= s14 * 683901
- s14 = 0
-
- s1 += s13 * 666643
- s2 += s13 * 470296
- s3 += s13 * 654183
- s4 -= s13 * 997805
- s5 += s13 * 136657
- s6 -= s13 * 683901
- s13 = 0
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[11] = s11 >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- s[0] = byte(s0 >> 0)
- s[1] = byte(s0 >> 8)
- s[2] = byte((s0 >> 16) | (s1 << 5))
- s[3] = byte(s1 >> 3)
- s[4] = byte(s1 >> 11)
- s[5] = byte((s1 >> 19) | (s2 << 2))
- s[6] = byte(s2 >> 6)
- s[7] = byte((s2 >> 14) | (s3 << 7))
- s[8] = byte(s3 >> 1)
- s[9] = byte(s3 >> 9)
- s[10] = byte((s3 >> 17) | (s4 << 4))
- s[11] = byte(s4 >> 4)
- s[12] = byte(s4 >> 12)
- s[13] = byte((s4 >> 20) | (s5 << 1))
- s[14] = byte(s5 >> 7)
- s[15] = byte((s5 >> 15) | (s6 << 6))
- s[16] = byte(s6 >> 2)
- s[17] = byte(s6 >> 10)
- s[18] = byte((s6 >> 18) | (s7 << 3))
- s[19] = byte(s7 >> 5)
- s[20] = byte(s7 >> 13)
- s[21] = byte(s8 >> 0)
- s[22] = byte(s8 >> 8)
- s[23] = byte((s8 >> 16) | (s9 << 5))
- s[24] = byte(s9 >> 3)
- s[25] = byte(s9 >> 11)
- s[26] = byte((s9 >> 19) | (s10 << 2))
- s[27] = byte(s10 >> 6)
- s[28] = byte((s10 >> 14) | (s11 << 7))
- s[29] = byte(s11 >> 1)
- s[30] = byte(s11 >> 9)
- s[31] = byte(s11 >> 17)
-}
-
-// Input:
-// s[0]+256*s[1]+...+256^63*s[63] = s
-//
-// Output:
-// s[0]+256*s[1]+...+256^31*s[31] = s mod l
-// where l = 2^252 + 27742317777372353535851937790883648493.
-func ScReduce(out *[32]byte, s *[64]byte) {
- s0 := 2097151 & load3(s[:])
- s1 := 2097151 & (load4(s[2:]) >> 5)
- s2 := 2097151 & (load3(s[5:]) >> 2)
- s3 := 2097151 & (load4(s[7:]) >> 7)
- s4 := 2097151 & (load4(s[10:]) >> 4)
- s5 := 2097151 & (load3(s[13:]) >> 1)
- s6 := 2097151 & (load4(s[15:]) >> 6)
- s7 := 2097151 & (load3(s[18:]) >> 3)
- s8 := 2097151 & load3(s[21:])
- s9 := 2097151 & (load4(s[23:]) >> 5)
- s10 := 2097151 & (load3(s[26:]) >> 2)
- s11 := 2097151 & (load4(s[28:]) >> 7)
- s12 := 2097151 & (load4(s[31:]) >> 4)
- s13 := 2097151 & (load3(s[34:]) >> 1)
- s14 := 2097151 & (load4(s[36:]) >> 6)
- s15 := 2097151 & (load3(s[39:]) >> 3)
- s16 := 2097151 & load3(s[42:])
- s17 := 2097151 & (load4(s[44:]) >> 5)
- s18 := 2097151 & (load3(s[47:]) >> 2)
- s19 := 2097151 & (load4(s[49:]) >> 7)
- s20 := 2097151 & (load4(s[52:]) >> 4)
- s21 := 2097151 & (load3(s[55:]) >> 1)
- s22 := 2097151 & (load4(s[57:]) >> 6)
- s23 := (load4(s[60:]) >> 3)
-
- s11 += s23 * 666643
- s12 += s23 * 470296
- s13 += s23 * 654183
- s14 -= s23 * 997805
- s15 += s23 * 136657
- s16 -= s23 * 683901
- s23 = 0
-
- s10 += s22 * 666643
- s11 += s22 * 470296
- s12 += s22 * 654183
- s13 -= s22 * 997805
- s14 += s22 * 136657
- s15 -= s22 * 683901
- s22 = 0
-
- s9 += s21 * 666643
- s10 += s21 * 470296
- s11 += s21 * 654183
- s12 -= s21 * 997805
- s13 += s21 * 136657
- s14 -= s21 * 683901
- s21 = 0
-
- s8 += s20 * 666643
- s9 += s20 * 470296
- s10 += s20 * 654183
- s11 -= s20 * 997805
- s12 += s20 * 136657
- s13 -= s20 * 683901
- s20 = 0
-
- s7 += s19 * 666643
- s8 += s19 * 470296
- s9 += s19 * 654183
- s10 -= s19 * 997805
- s11 += s19 * 136657
- s12 -= s19 * 683901
- s19 = 0
-
- s6 += s18 * 666643
- s7 += s18 * 470296
- s8 += s18 * 654183
- s9 -= s18 * 997805
- s10 += s18 * 136657
- s11 -= s18 * 683901
- s18 = 0
-
- var carry [17]int64
-
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[12] = (s12 + (1 << 20)) >> 21
- s13 += carry[12]
- s12 -= carry[12] << 21
- carry[14] = (s14 + (1 << 20)) >> 21
- s15 += carry[14]
- s14 -= carry[14] << 21
- carry[16] = (s16 + (1 << 20)) >> 21
- s17 += carry[16]
- s16 -= carry[16] << 21
-
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
- carry[13] = (s13 + (1 << 20)) >> 21
- s14 += carry[13]
- s13 -= carry[13] << 21
- carry[15] = (s15 + (1 << 20)) >> 21
- s16 += carry[15]
- s15 -= carry[15] << 21
-
- s5 += s17 * 666643
- s6 += s17 * 470296
- s7 += s17 * 654183
- s8 -= s17 * 997805
- s9 += s17 * 136657
- s10 -= s17 * 683901
- s17 = 0
-
- s4 += s16 * 666643
- s5 += s16 * 470296
- s6 += s16 * 654183
- s7 -= s16 * 997805
- s8 += s16 * 136657
- s9 -= s16 * 683901
- s16 = 0
-
- s3 += s15 * 666643
- s4 += s15 * 470296
- s5 += s15 * 654183
- s6 -= s15 * 997805
- s7 += s15 * 136657
- s8 -= s15 * 683901
- s15 = 0
-
- s2 += s14 * 666643
- s3 += s14 * 470296
- s4 += s14 * 654183
- s5 -= s14 * 997805
- s6 += s14 * 136657
- s7 -= s14 * 683901
- s14 = 0
-
- s1 += s13 * 666643
- s2 += s13 * 470296
- s3 += s13 * 654183
- s4 -= s13 * 997805
- s5 += s13 * 136657
- s6 -= s13 * 683901
- s13 = 0
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = (s0 + (1 << 20)) >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[2] = (s2 + (1 << 20)) >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[4] = (s4 + (1 << 20)) >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[6] = (s6 + (1 << 20)) >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[8] = (s8 + (1 << 20)) >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[10] = (s10 + (1 << 20)) >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- carry[1] = (s1 + (1 << 20)) >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[3] = (s3 + (1 << 20)) >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[5] = (s5 + (1 << 20)) >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[7] = (s7 + (1 << 20)) >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[9] = (s9 + (1 << 20)) >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[11] = (s11 + (1 << 20)) >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
- carry[11] = s11 >> 21
- s12 += carry[11]
- s11 -= carry[11] << 21
-
- s0 += s12 * 666643
- s1 += s12 * 470296
- s2 += s12 * 654183
- s3 -= s12 * 997805
- s4 += s12 * 136657
- s5 -= s12 * 683901
- s12 = 0
-
- carry[0] = s0 >> 21
- s1 += carry[0]
- s0 -= carry[0] << 21
- carry[1] = s1 >> 21
- s2 += carry[1]
- s1 -= carry[1] << 21
- carry[2] = s2 >> 21
- s3 += carry[2]
- s2 -= carry[2] << 21
- carry[3] = s3 >> 21
- s4 += carry[3]
- s3 -= carry[3] << 21
- carry[4] = s4 >> 21
- s5 += carry[4]
- s4 -= carry[4] << 21
- carry[5] = s5 >> 21
- s6 += carry[5]
- s5 -= carry[5] << 21
- carry[6] = s6 >> 21
- s7 += carry[6]
- s6 -= carry[6] << 21
- carry[7] = s7 >> 21
- s8 += carry[7]
- s7 -= carry[7] << 21
- carry[8] = s8 >> 21
- s9 += carry[8]
- s8 -= carry[8] << 21
- carry[9] = s9 >> 21
- s10 += carry[9]
- s9 -= carry[9] << 21
- carry[10] = s10 >> 21
- s11 += carry[10]
- s10 -= carry[10] << 21
-
- out[0] = byte(s0 >> 0)
- out[1] = byte(s0 >> 8)
- out[2] = byte((s0 >> 16) | (s1 << 5))
- out[3] = byte(s1 >> 3)
- out[4] = byte(s1 >> 11)
- out[5] = byte((s1 >> 19) | (s2 << 2))
- out[6] = byte(s2 >> 6)
- out[7] = byte((s2 >> 14) | (s3 << 7))
- out[8] = byte(s3 >> 1)
- out[9] = byte(s3 >> 9)
- out[10] = byte((s3 >> 17) | (s4 << 4))
- out[11] = byte(s4 >> 4)
- out[12] = byte(s4 >> 12)
- out[13] = byte((s4 >> 20) | (s5 << 1))
- out[14] = byte(s5 >> 7)
- out[15] = byte((s5 >> 15) | (s6 << 6))
- out[16] = byte(s6 >> 2)
- out[17] = byte(s6 >> 10)
- out[18] = byte((s6 >> 18) | (s7 << 3))
- out[19] = byte(s7 >> 5)
- out[20] = byte(s7 >> 13)
- out[21] = byte(s8 >> 0)
- out[22] = byte(s8 >> 8)
- out[23] = byte((s8 >> 16) | (s9 << 5))
- out[24] = byte(s9 >> 3)
- out[25] = byte(s9 >> 11)
- out[26] = byte((s9 >> 19) | (s10 << 2))
- out[27] = byte(s10 >> 6)
- out[28] = byte((s10 >> 14) | (s11 << 7))
- out[29] = byte(s11 >> 1)
- out[30] = byte(s11 >> 9)
- out[31] = byte(s11 >> 17)
-}
-
-// order is the order of Curve25519 in little-endian form.
-var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000}
-
-// ScMinimal returns true if the given scalar is less than the order of the
-// curve.
-func ScMinimal(scalar *[32]byte) bool {
- for i := 3; ; i-- {
- v := binary.LittleEndian.Uint64(scalar[i*8:])
- if v > order[i] {
- return false
- } else if v < order[i] {
- break
- } else if i == 0 {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go b/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go
deleted file mode 100644
index b65b58bc..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
-// very similar to PEM except that it has an additional CRC checksum.
-package armor // import "github.com/keybase/go-crypto/openpgp/armor"
-
-import (
- "bufio"
- "bytes"
- "encoding/base64"
- "io"
- "strings"
- "unicode"
-
- "github.com/keybase/go-crypto/openpgp/errors"
-)
-
-// A Block represents an OpenPGP armored structure.
-//
-// The encoded form is:
-// -----BEGIN Type-----
-// Headers
-//
-// base64-encoded Bytes
-// '=' base64 encoded checksum
-// -----END Type-----
-// where Headers is a possibly empty sequence of Key: Value lines.
-//
-// Since the armored data can be very large, this package presents a streaming
-// interface.
-type Block struct {
- Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
- Header map[string]string // Optional headers.
- Body io.Reader // A Reader from which the contents can be read
- lReader lineReader
- oReader openpgpReader
-}
-
-var ArmorCorrupt error = errors.StructuralError("armor invalid")
-
-const crc24Init = 0xb704ce
-const crc24Poly = 0x1864cfb
-const crc24Mask = 0xffffff
-
-// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
-func crc24(crc uint32, d []byte) uint32 {
- for _, b := range d {
- crc ^= uint32(b) << 16
- for i := 0; i < 8; i++ {
- crc <<= 1
- if crc&0x1000000 != 0 {
- crc ^= crc24Poly
- }
- }
- }
- return crc
-}
-
-var armorStart = []byte("-----BEGIN ")
-var armorEnd = []byte("-----END ")
-var armorEndOfLine = []byte("-----")
-
-// lineReader wraps a line based reader. It watches for the end of an armor
-// block and records the expected CRC value.
-type lineReader struct {
- in *bufio.Reader
- buf []byte
- eof bool
- crc *uint32
-}
-
-// ourIsSpace checks if a rune is either space according to unicode
-// package, or ZeroWidthSpace (which is not a space according to
-// unicode module). Used to trim lines during header reading.
-func ourIsSpace(r rune) bool {
- return r == '\u200b' || unicode.IsSpace(r)
-}
-
-func (l *lineReader) Read(p []byte) (n int, err error) {
- if l.eof {
- return 0, io.EOF
- }
-
- if len(l.buf) > 0 {
- n = copy(p, l.buf)
- l.buf = l.buf[n:]
- return
- }
-
- line, _, err := l.in.ReadLine()
- if err != nil {
- return
- }
-
- // Entry-level cleanup, just trim spaces.
- line = bytes.TrimFunc(line, ourIsSpace)
-
- if len(line) == 5 && line[0] == '=' {
- // This is the checksum line
- var expectedBytes [3]byte
- var m int
- m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
- if m != 3 || err != nil {
- return
- }
- crc := uint32(expectedBytes[0])<<16 |
- uint32(expectedBytes[1])<<8 |
- uint32(expectedBytes[2])
- l.crc = &crc
-
- for {
- line, _, err = l.in.ReadLine()
- if err == io.EOF {
- break
- }
- if err != nil {
- return
- }
- if len(strings.TrimSpace(string(line))) > 0 {
- break
- }
- }
- if !bytes.HasPrefix(line, armorEnd) {
- return 0, ArmorCorrupt
- }
-
- l.eof = true
- return 0, io.EOF
- }
-
- if bytes.HasPrefix(line, armorEnd) {
- // Unexpected ending, there was no checksum.
- l.eof = true
- l.crc = nil
- return 0, io.EOF
- }
-
- // Clean-up line from whitespace to pass it further (to base64
- // decoder). This is done after test for CRC and test for
- // armorEnd. Keys that have whitespace in CRC will have CRC
- // treated as part of the payload and probably fail in base64
- // reading.
- line = bytes.Map(func(r rune) rune {
- if ourIsSpace(r) {
- return -1
- }
- return r
- }, line)
-
- n = copy(p, line)
- bytesToSave := len(line) - n
- if bytesToSave > 0 {
- if cap(l.buf) < bytesToSave {
- l.buf = make([]byte, 0, bytesToSave)
- }
- l.buf = l.buf[0:bytesToSave]
- copy(l.buf, line[n:])
- }
-
- return
-}
-
-// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
-// a running CRC of the resulting data and checks the CRC against the value
-// found by the lineReader at EOF.
-type openpgpReader struct {
- lReader *lineReader
- b64Reader io.Reader
- currentCRC uint32
-}
-
-func (r *openpgpReader) Read(p []byte) (n int, err error) {
- n, err = r.b64Reader.Read(p)
- r.currentCRC = crc24(r.currentCRC, p[:n])
-
- if err == io.EOF {
- if r.lReader.crc != nil && *r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
- return 0, ArmorCorrupt
- }
- }
-
- return
-}
-
-// Decode reads a PGP armored block from the given Reader. It will ignore
-// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
-// given Reader is not usable after calling this function: an arbitrary amount
-// of data may have been read past the end of the block.
-func Decode(in io.Reader) (p *Block, err error) {
- r := bufio.NewReaderSize(in, 100)
- var line []byte
- ignoreNext := false
-
-TryNextBlock:
- p = nil
-
- // Skip leading garbage
- for {
- ignoreThis := ignoreNext
- line, ignoreNext, err = r.ReadLine()
- if err != nil {
- return
- }
- if ignoreNext || ignoreThis {
- continue
- }
- line = bytes.TrimSpace(line)
- if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
- break
- }
- }
-
- p = new(Block)
- p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
- p.Header = make(map[string]string)
- nextIsContinuation := false
- var lastKey string
-
- // Read headers
- for {
- isContinuation := nextIsContinuation
- line, nextIsContinuation, err = r.ReadLine()
- if err != nil {
- p = nil
- return
- }
- if isContinuation {
- p.Header[lastKey] += string(line)
- continue
- }
- line = bytes.TrimFunc(line, ourIsSpace)
- if len(line) == 0 {
- break
- }
-
- i := bytes.Index(line, []byte(": "))
- if i == -1 {
- goto TryNextBlock
- }
- lastKey = string(line[:i])
- p.Header[lastKey] = string(line[i+2:])
- }
-
- p.lReader.in = r
- p.oReader.currentCRC = crc24Init
- p.oReader.lReader = &p.lReader
- p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
- p.Body = &p.oReader
-
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go b/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go
deleted file mode 100644
index 075a1978..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package armor
-
-import (
- "encoding/base64"
- "io"
-)
-
-var armorHeaderSep = []byte(": ")
-var blockEnd = []byte("\n=")
-var newline = []byte("\n")
-var armorEndOfLineOut = []byte("-----\n")
-
-// writeSlices writes its arguments to the given Writer.
-func writeSlices(out io.Writer, slices ...[]byte) (err error) {
- for _, s := range slices {
- _, err = out.Write(s)
- if err != nil {
- return err
- }
- }
- return
-}
-
-// lineBreaker breaks data across several lines, all of the same byte length
-// (except possibly the last). Lines are broken with a single '\n'.
-type lineBreaker struct {
- lineLength int
- line []byte
- used int
- out io.Writer
- haveWritten bool
-}
-
-func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
- return &lineBreaker{
- lineLength: lineLength,
- line: make([]byte, lineLength),
- used: 0,
- out: out,
- }
-}
-
-func (l *lineBreaker) Write(b []byte) (n int, err error) {
- n = len(b)
-
- if n == 0 {
- return
- }
-
- if l.used == 0 && l.haveWritten {
- _, err = l.out.Write([]byte{'\n'})
- if err != nil {
- return
- }
- }
-
- if l.used+len(b) < l.lineLength {
- l.used += copy(l.line[l.used:], b)
- return
- }
-
- l.haveWritten = true
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- excess := l.lineLength - l.used
- l.used = 0
-
- _, err = l.out.Write(b[0:excess])
- if err != nil {
- return
- }
-
- _, err = l.Write(b[excess:])
- return
-}
-
-func (l *lineBreaker) Close() (err error) {
- if l.used > 0 {
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- }
-
- return
-}
-
-// encoding keeps track of a running CRC24 over the data which has been written
-// to it and outputs a OpenPGP checksum when closed, followed by an armor
-// trailer.
-//
-// It's built into a stack of io.Writers:
-// encoding -> base64 encoder -> lineBreaker -> out
-type encoding struct {
- out io.Writer
- breaker *lineBreaker
- b64 io.WriteCloser
- crc uint32
- blockType []byte
-}
-
-func (e *encoding) Write(data []byte) (n int, err error) {
- e.crc = crc24(e.crc, data)
- return e.b64.Write(data)
-}
-
-func (e *encoding) Close() (err error) {
- err = e.b64.Close()
- if err != nil {
- return
- }
- e.breaker.Close()
-
- var checksumBytes [3]byte
- checksumBytes[0] = byte(e.crc >> 16)
- checksumBytes[1] = byte(e.crc >> 8)
- checksumBytes[2] = byte(e.crc)
-
- var b64ChecksumBytes [4]byte
- base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
-
- return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine, []byte{'\n'})
-}
-
-// Encode returns a WriteCloser which will encode the data written to it in
-// OpenPGP armor.
-func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
- bType := []byte(blockType)
- err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
- if err != nil {
- return
- }
-
- for k, v := range headers {
- err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
- if err != nil {
- return
- }
- }
-
- _, err = out.Write(newline)
- if err != nil {
- return
- }
-
- e := &encoding{
- out: out,
- breaker: newLineBreaker(out, 64),
- crc: crc24Init,
- blockType: bType,
- }
- e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
- return e, nil
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go b/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go
deleted file mode 100644
index e601e389..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import "hash"
-
-// NewCanonicalTextHash reformats text written to it into the canonical
-// form and then applies the hash h. See RFC 4880, section 5.2.1.
-func NewCanonicalTextHash(h hash.Hash) hash.Hash {
- return &canonicalTextHash{h, 0}
-}
-
-type canonicalTextHash struct {
- h hash.Hash
- s int
-}
-
-var newline = []byte{'\r', '\n'}
-
-func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
- start := 0
-
- for i, c := range buf {
- switch cth.s {
- case 0:
- if c == '\r' {
- cth.s = 1
- } else if c == '\n' {
- cth.h.Write(buf[start:i])
- cth.h.Write(newline)
- start = i + 1
- }
- case 1:
- cth.s = 0
- }
- }
-
- cth.h.Write(buf[start:])
- return len(buf), nil
-}
-
-func (cth *canonicalTextHash) Sum(in []byte) []byte {
- return cth.h.Sum(in)
-}
-
-func (cth *canonicalTextHash) Reset() {
- cth.h.Reset()
- cth.s = 0
-}
-
-func (cth *canonicalTextHash) Size() int {
- return cth.h.Size()
-}
-
-func (cth *canonicalTextHash) BlockSize() int {
- return cth.h.BlockSize()
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go
deleted file mode 100644
index 1a87b275..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/ecdh/ecdh.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package ecdh
-
-import (
- "bytes"
- "crypto"
- "crypto/aes"
- "crypto/elliptic"
- "encoding/binary"
- "errors"
- "github.com/keybase/go-crypto/curve25519"
- "io"
- "math/big"
-)
-
-type PublicKey struct {
- elliptic.Curve
- X, Y *big.Int
-}
-
-type PrivateKey struct {
- PublicKey
- X *big.Int
-}
-
-// KDF implements Key Derivation Function as described in
-// https://tools.ietf.org/html/rfc6637#section-7
-func (e *PublicKey) KDF(S []byte, kdfParams []byte, hash crypto.Hash) []byte {
- sLen := (e.Curve.Params().P.BitLen() + 7) / 8
- buf := new(bytes.Buffer)
- buf.Write([]byte{0, 0, 0, 1})
- if sLen > len(S) {
- // zero-pad the S. If we got invalid S (bigger than curve's
- // P), we are going to produce invalid key. Garbage in,
- // garbage out.
- buf.Write(make([]byte, sLen-len(S)))
- }
- buf.Write(S)
- buf.Write(kdfParams)
-
- hashw := hash.New()
-
- hashw.Write(buf.Bytes())
- key := hashw.Sum(nil)
-
- return key
-}
-
-// AESKeyUnwrap implements RFC 3394 Key Unwrapping. See
-// http://tools.ietf.org/html/rfc3394#section-2.2.1
-// Note: The second described algorithm ("index-based") is implemented
-// here.
-func AESKeyUnwrap(key, cipherText []byte) ([]byte, error) {
- if len(cipherText)%8 != 0 {
- return nil, errors.New("cipherText must by a multiple of 64 bits")
- }
-
- cipher, err := aes.NewCipher(key)
- if err != nil {
- return nil, err
- }
-
- nblocks := len(cipherText)/8 - 1
-
- // 1) Initialize variables.
- // - Set A = C[0]
- var A [aes.BlockSize]byte
- copy(A[:8], cipherText[:8])
-
- // For i = 1 to n
- // Set R[i] = C[i]
- R := make([]byte, len(cipherText)-8)
- copy(R, cipherText[8:])
-
- // 2) Compute intermediate values.
- for j := 5; j >= 0; j-- {
- for i := nblocks - 1; i >= 0; i-- {
- // B = AES-1(K, (A ^ t) | R[i]) where t = n*j+i
- // A = MSB(64, B)
- t := uint64(nblocks*j + i + 1)
- At := binary.BigEndian.Uint64(A[:8]) ^ t
- binary.BigEndian.PutUint64(A[:8], At)
-
- copy(A[8:], R[i*8:i*8+8])
- cipher.Decrypt(A[:], A[:])
-
- // R[i] = LSB(B, 64)
- copy(R[i*8:i*8+8], A[8:])
- }
- }
-
- // 3) Output results.
- // If A is an appropriate initial value (see 2.2.3),
- for i := 0; i < 8; i++ {
- if A[i] != 0xA6 {
- return nil, errors.New("Failed to unwrap key (A is not IV)")
- }
- }
-
- return R, nil
-}
-
-// AESKeyWrap implements RFC 3394 Key Wrapping. See
-// https://tools.ietf.org/html/rfc3394#section-2.2.2
-// Note: The second described algorithm ("index-based") is implemented
-// here.
-func AESKeyWrap(key, plainText []byte) ([]byte, error) {
- if len(plainText)%8 != 0 {
- return nil, errors.New("plainText must be a multiple of 64 bits")
- }
-
- cipher, err := aes.NewCipher(key) // NewCipher checks key size
- if err != nil {
- return nil, err
- }
-
- nblocks := len(plainText) / 8
-
- // 1) Initialize variables.
- var A [aes.BlockSize]byte
- // Section 2.2.3.1 -- Initial Value
- // http://tools.ietf.org/html/rfc3394#section-2.2.3.1
- for i := 0; i < 8; i++ {
- A[i] = 0xA6
- }
-
- // For i = 1 to n
- // Set R[i] = P[i]
- R := make([]byte, len(plainText))
- copy(R, plainText)
-
- // 2) Calculate intermediate values.
- for j := 0; j <= 5; j++ {
- for i := 0; i < nblocks; i++ {
- // B = AES(K, A | R[i])
- copy(A[8:], R[i*8:i*8+8])
- cipher.Encrypt(A[:], A[:])
-
- // (Assume B = A)
- // A = MSB(64, B) ^ t where t = (n*j)+1
- t := uint64(j*nblocks + i + 1)
- At := binary.BigEndian.Uint64(A[:8]) ^ t
- binary.BigEndian.PutUint64(A[:8], At)
-
- // R[i] = LSB(64, B)
- copy(R[i*8:i*8+8], A[8:])
- }
- }
-
- // 3) Output results.
- // Set C[0] = A
- // For i = 1 to n
- // C[i] = R[i]
- return append(A[:8], R...), nil
-}
-
-// PadBuffer pads byte buffer buf to a length being multiple of
-// blockLen. Additional bytes appended to the buffer have value of the
-// number padded bytes. E.g. if the buffer is 3 bytes short of being
-// 40 bytes total, the appended bytes will be [03, 03, 03].
-func PadBuffer(buf []byte, blockLen int) []byte {
- padding := blockLen - (len(buf) % blockLen)
- if padding == 0 {
- return buf
- }
-
- padBuf := make([]byte, padding)
- for i := 0; i < padding; i++ {
- padBuf[i] = byte(padding)
- }
-
- return append(buf, padBuf...)
-}
-
-// UnpadBuffer verifies that buffer contains proper padding and
-// returns buffer without the padding, or nil if the padding was
-// invalid.
-func UnpadBuffer(buf []byte, dataLen int) []byte {
- padding := len(buf) - dataLen
- outBuf := buf[:dataLen]
-
- for i := dataLen; i < len(buf); i++ {
- if buf[i] != byte(padding) {
- // Invalid padding - bail out
- return nil
- }
- }
-
- return outBuf
-}
-
-func (e *PublicKey) Encrypt(random io.Reader, kdfParams []byte, plain []byte, hash crypto.Hash, kdfKeySize int) (Vx *big.Int, Vy *big.Int, C []byte, err error) {
- // Vx, Vy - encryption key
-
- // Note for Curve 25519 - curve25519 library already does key
- // clamping in scalarMult, so we can use generic random scalar
- // generation from elliptic.
- priv, Vx, Vy, err := elliptic.GenerateKey(e.Curve, random)
- if err != nil {
- return nil, nil, nil, err
- }
-
- // Sx, Sy - shared secret
- Sx, _ := e.Curve.ScalarMult(e.X, e.Y, priv)
-
- // Encrypt the payload with KDF-ed S as the encryption key. Pass
- // the ciphertext along with V to the recipient. Recipient can
- // generate S using V and their priv key, and then KDF(S), on
- // their own, to get encryption key and decrypt the ciphertext,
- // revealing encryption key for symmetric encryption later.
-
- plain = PadBuffer(plain, 8)
- key := e.KDF(Sx.Bytes(), kdfParams, hash)
-
- // Take only as many bytes from key as the key length (the hash
- // result might be bigger)
- encrypted, err := AESKeyWrap(key[:kdfKeySize], plain)
-
- return Vx, Vy, encrypted, nil
-}
-
-func (e *PrivateKey) DecryptShared(X, Y *big.Int) []byte {
- Sx, _ := e.Curve.ScalarMult(X, Y, e.X.Bytes())
- return Sx.Bytes()
-}
-
-func countBits(buffer []byte) int {
- var headerLen int
- switch buffer[0] {
- case 0x4:
- headerLen = 3
- case 0x40:
- headerLen = 7
- default:
- // Unexpected header - but we can still count the bits.
- val := buffer[0]
- headerLen = 0
- for val > 0 {
- val = val / 2
- headerLen++
- }
- }
-
- return headerLen + (len(buffer)-1)*8
-}
-
-// elliptic.Marshal and elliptic.Unmarshal only marshals uncompressed
-// 0x4 MPI types. These functions will check if the curve is cv25519,
-// and if so, use 0x40 compressed type to (un)marshal. Otherwise,
-// elliptic.(Un)marshal will be called.
-
-// Marshal encodes point into either 0x4 uncompressed point form, or
-// 0x40 compressed point for Curve 25519.
-func Marshal(curve elliptic.Curve, x, y *big.Int) (buf []byte, bitSize int) {
- // NOTE: Read more about MPI encoding in the RFC:
- // https://tools.ietf.org/html/rfc4880#section-3.2
-
- // We are required to encode size in bits, counting from the most-
- // significant non-zero bit. So assuming that the buffer never
- // starts with 0x00, we only need to count bits in the first byte
- // - and in current implentation it will always be 0x4 or 0x40.
-
- cv, ok := curve25519.ToCurve25519(curve)
- if ok {
- buf = cv.MarshalType40(x, y)
- } else {
- buf = elliptic.Marshal(curve, x, y)
- }
-
- return buf, countBits(buf)
-}
-
-// Unmarshal converts point, serialized by Marshal, into x, y pair.
-// For 0x40 compressed points (for Curve 25519), y will always be 0.
-// It is an error if point is not on the curve, On error, x = nil.
-func Unmarshal(curve elliptic.Curve, data []byte) (x, y *big.Int) {
- cv, ok := curve25519.ToCurve25519(curve)
- if ok {
- return cv.UnmarshalType40(data)
- }
-
- return elliptic.Unmarshal(curve, data)
-}
-
-func GenerateKey(curve elliptic.Curve, random io.Reader) (priv *PrivateKey, err error) {
- var privBytes []byte
- var Vx, Vy *big.Int
-
- if _, ok := curve25519.ToCurve25519(curve); ok {
- privBytes = make([]byte, 32)
- _, err = io.ReadFull(random, privBytes)
- if err != nil {
- return nil, err
- }
-
- // NOTE: PGP expect scalars in reverse order than Curve 25519
- // go library. That's why this trimming is backwards compared
- // to curve25519.go
- privBytes[31] &= 248
- privBytes[0] &= 127
- privBytes[0] |= 64
-
- Vx,Vy = curve.ScalarBaseMult(privBytes)
- } else {
- privBytes, Vx, Vy, err = elliptic.GenerateKey(curve, random)
- if err != nil {
- return nil, err
- }
- }
-
- priv = &PrivateKey{}
- priv.X = new(big.Int).SetBytes(privBytes)
- priv.PublicKey.Curve = curve
- priv.PublicKey.X = Vx
- priv.PublicKey.Y = Vy
- return priv, nil
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go
deleted file mode 100644
index 15dafc55..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
-// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
-// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
-// n. 4, 1985, pp. 469-472.
-//
-// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
-// unsuitable for other protocols. RSA should be used in preference in any
-// case.
-package elgamal // import "github.com/keybase/go-crypto/openpgp/elgamal"
-
-import (
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "io"
- "math/big"
-)
-
-// PublicKey represents an ElGamal public key.
-type PublicKey struct {
- G, P, Y *big.Int
-}
-
-// PrivateKey represents an ElGamal private key.
-type PrivateKey struct {
- PublicKey
- X *big.Int
-}
-
-// Encrypt encrypts the given message to the given public key. The result is a
-// pair of integers. Errors can result from reading random, or because msg is
-// too large to be encrypted to the public key.
-func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
- pLen := (pub.P.BitLen() + 7) / 8
- if len(msg) > pLen-11 {
- err = errors.New("elgamal: message too long")
- return
- }
-
- // EM = 0x02 || PS || 0x00 || M
- em := make([]byte, pLen-1)
- em[0] = 2
- ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
- err = nonZeroRandomBytes(ps, random)
- if err != nil {
- return
- }
- em[len(em)-len(msg)-1] = 0
- copy(mm, msg)
-
- m := new(big.Int).SetBytes(em)
-
- k, err := rand.Int(random, pub.P)
- if err != nil {
- return
- }
-
- c1 = new(big.Int).Exp(pub.G, k, pub.P)
- s := new(big.Int).Exp(pub.Y, k, pub.P)
- c2 = s.Mul(s, m)
- c2.Mod(c2, pub.P)
-
- return
-}
-
-// Decrypt takes two integers, resulting from an ElGamal encryption, and
-// returns the plaintext of the message. An error can result only if the
-// ciphertext is invalid. Users should keep in mind that this is a padding
-// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
-// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
-// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
-// Bleichenbacher, Advances in Cryptology (Crypto '98),
-func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
- s := new(big.Int).Exp(c1, priv.X, priv.P)
- s.ModInverse(s, priv.P)
- s.Mul(s, c2)
- s.Mod(s, priv.P)
- em := s.Bytes()
-
- firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
-
- // The remainder of the plaintext must be a string of non-zero random
- // octets, followed by a 0, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the zero.
- // index: the offset of the first zero byte.
- var lookingForIndex, index int
- lookingForIndex = 1
-
- for i := 1; i < len(em); i++ {
- equals0 := subtle.ConstantTimeByteEq(em[i], 0)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
- }
-
- if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
- return nil, errors.New("elgamal: decryption error")
- }
- return em[index+1:], nil
-}
-
-// nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
- _, err = io.ReadFull(rand, s)
- if err != nil {
- return
- }
-
- for i := 0; i < len(s); i++ {
- for s[i] == 0 {
- _, err = io.ReadFull(rand, s[i:i+1])
- if err != nil {
- return
- }
- }
- }
-
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go b/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go
deleted file mode 100644
index 855fa89c..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package errors contains common error types for the OpenPGP packages.
-package errors // import "github.com/keybase/go-crypto/openpgp/errors"
-
-import (
- "strconv"
-)
-
-// A StructuralError is returned when OpenPGP data is found to be syntactically
-// invalid.
-type StructuralError string
-
-func (s StructuralError) Error() string {
- return "openpgp: invalid data: " + string(s)
-}
-
-// UnsupportedError indicates that, although the OpenPGP data is valid, it
-// makes use of currently unimplemented features.
-type UnsupportedError string
-
-func (s UnsupportedError) Error() string {
- return "openpgp: unsupported feature: " + string(s)
-}
-
-// InvalidArgumentError indicates that the caller is in error and passed an
-// incorrect value.
-type InvalidArgumentError string
-
-func (i InvalidArgumentError) Error() string {
- return "openpgp: invalid argument: " + string(i)
-}
-
-// SignatureError indicates that a syntactically valid signature failed to
-// validate.
-type SignatureError string
-
-func (b SignatureError) Error() string {
- return "openpgp: invalid signature: " + string(b)
-}
-
-type keyIncorrectError int
-
-func (ki keyIncorrectError) Error() string {
- return "openpgp: incorrect key"
-}
-
-var ErrKeyIncorrect error = keyIncorrectError(0)
-
-type unknownIssuerError int
-
-func (unknownIssuerError) Error() string {
- return "openpgp: signature made by unknown entity"
-}
-
-var ErrUnknownIssuer error = unknownIssuerError(0)
-
-type keyRevokedError int
-
-func (keyRevokedError) Error() string {
- return "openpgp: signature made by revoked key"
-}
-
-var ErrKeyRevoked error = keyRevokedError(0)
-
-type UnknownPacketTypeError uint8
-
-func (upte UnknownPacketTypeError) Error() string {
- return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
-}
-
-// DeprecatedKeyError indicates that the key was read and verified
-// properly, but uses a deprecated algorithm and can't be used.
-type DeprecatedKeyError string
-
-func (d DeprecatedKeyError) Error() string {
- return "openpgp: key is deprecated: " + string(d)
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/keys.go b/vendor/github.com/keybase/go-crypto/openpgp/keys.go
deleted file mode 100644
index b30315c4..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/keys.go
+++ /dev/null
@@ -1,934 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto/hmac"
- "encoding/binary"
- "io"
- "time"
-
- "github.com/keybase/go-crypto/openpgp/armor"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/packet"
- "github.com/keybase/go-crypto/rsa"
-)
-
-// PublicKeyType is the armor type for a PGP public key.
-var PublicKeyType = "PGP PUBLIC KEY BLOCK"
-
-// PrivateKeyType is the armor type for a PGP private key.
-var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
-
-// An Entity represents the components of an OpenPGP key: a primary public key
-// (which must be a signing key), one or more identities claimed by that key,
-// and zero or more subkeys, which may be encryption keys.
-type Entity struct {
- PrimaryKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Identities map[string]*Identity // indexed by Identity.Name
- Revocations []*packet.Signature
- // Revocations that are signed by designated revokers. Reading keys
- // will not verify these revocations, because it won't have access to
- // issuers' public keys, API consumers should do this instead (or
- // not, and just assume that the key is probably revoked).
- UnverifiedRevocations []*packet.Signature
- Subkeys []Subkey
- BadSubkeys []BadSubkey
-}
-
-// An Identity represents an identity claimed by an Entity and zero or more
-// assertions by other entities about that claim.
-type Identity struct {
- Name string // by convention, has the form "Full Name (comment) "
- UserId *packet.UserId
- SelfSignature *packet.Signature
- Signatures []*packet.Signature
- Revocation *packet.Signature
-}
-
-// A Subkey is an additional public key in an Entity. Subkeys can be used for
-// encryption.
-type Subkey struct {
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Sig *packet.Signature
- Revocation *packet.Signature
-}
-
-// BadSubkey is one that failed reconstruction, but we'll keep it around for
-// informational purposes.
-type BadSubkey struct {
- Subkey
- Err error
-}
-
-// A Key identifies a specific public key in an Entity. This is either the
-// Entity's primary key or a subkey.
-type Key struct {
- Entity *Entity
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- SelfSignature *packet.Signature
- KeyFlags packet.KeyFlagBits
-}
-
-// A KeyRing provides access to public and private keys.
-type KeyRing interface {
-
- // KeysById returns the set of keys that have the given key id.
- // fp can be optionally supplied, which is the full key fingerprint.
- // If it's provided, then it must match. This comes up in the case
- // of GPG subpacket 33.
- KeysById(id uint64, fp []byte) []Key
-
- // KeysByIdAndUsage returns the set of keys with the given id
- // that also meet the key usage given by requiredUsage.
- // The requiredUsage is expressed as the bitwise-OR of
- // packet.KeyFlag* values.
- // fp can be optionally supplied, which is the full key fingerprint.
- // If it's provided, then it must match. This comes up in the case
- // of GPG subpacket 33.
- KeysByIdUsage(id uint64, fp []byte, requiredUsage byte) []Key
-
- // DecryptionKeys returns all private keys that are valid for
- // decryption.
- DecryptionKeys() []Key
-}
-
-// primaryIdentity returns the Identity marked as primary or the first identity
-// if none are so marked.
-func (e *Entity) primaryIdentity() *Identity {
- var firstIdentity *Identity
- for _, ident := range e.Identities {
- if firstIdentity == nil {
- firstIdentity = ident
- }
- if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
- return ident
- }
- }
- return firstIdentity
-}
-
-// encryptionKey returns the best candidate Key for encrypting a message to the
-// given Entity.
-func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
- candidateSubkey := -1
-
- // Iterate the keys to find the newest, non-revoked key that can
- // encrypt.
- var maxTime time.Time
- for i, subkey := range e.Subkeys {
-
- // NOTE(maxtaco)
- // If there is a Flags subpacket, then we have to follow it, and only
- // use keys that are marked for Encryption of Communication. If there
- // isn't a Flags subpacket, and this is an Encrypt-Only key (right now only ElGamal
- // suffices), then we implicitly use it. The check for primary below is a little
- // more open-ended, but for now, let's be strict and potentially open up
- // if we see bugs in the wild.
- //
- // One more note: old DSA/ElGamal keys tend not to have the Flags subpacket,
- // so this sort of thing is pretty important for encrypting to older keys.
- //
- if ((subkey.Sig.FlagsValid && subkey.Sig.FlagEncryptCommunications) ||
- (!subkey.Sig.FlagsValid && subkey.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal)) &&
- subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
- !subkey.Sig.KeyExpired(now) &&
- subkey.Revocation == nil &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
- candidateSubkey = i
- maxTime = subkey.Sig.CreationTime
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true
- }
-
- // If we don't have any candidate subkeys for encryption and
- // the primary key doesn't have any usage metadata then we
- // assume that the primary key is ok. Or, if the primary key is
- // marked as ok to encrypt to, then we can obviously use it.
- //
- // NOTE(maxtaco) - see note above, how this policy is a little too open-ended
- // for my liking, but leave it for now.
- i := e.primaryIdentity()
- if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications) &&
- e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
- !i.SelfSignature.KeyExpired(now) {
- return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true
- }
-
- // This Entity appears to be signing only.
- return Key{}, false
-}
-
-// signingKey return the best candidate Key for signing a message with this
-// Entity.
-func (e *Entity) signingKey(now time.Time) (Key, bool) {
- candidateSubkey := -1
-
- // Iterate the keys to find the newest, non-revoked key that can
- // sign.
- var maxTime time.Time
- for i, subkey := range e.Subkeys {
- if (!subkey.Sig.FlagsValid || subkey.Sig.FlagSign) &&
- subkey.PrivateKey.PrivateKey != nil &&
- subkey.PublicKey.PubKeyAlgo.CanSign() &&
- !subkey.Sig.KeyExpired(now) &&
- subkey.Revocation == nil &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
- candidateSubkey = i
- maxTime = subkey.Sig.CreationTime
- break
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true
- }
-
- // If we have no candidate subkey then we assume that it's ok to sign
- // with the primary key.
- i := e.primaryIdentity()
- if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign) &&
- e.PrimaryKey.PubKeyAlgo.CanSign() &&
- !i.SelfSignature.KeyExpired(now) &&
- e.PrivateKey.PrivateKey != nil {
- return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true
- }
-
- return Key{}, false
-}
-
-// An EntityList contains one or more Entities.
-type EntityList []*Entity
-
-func keyMatchesIdAndFingerprint(key *packet.PublicKey, id uint64, fp []byte) bool {
- if key.KeyId != id {
- return false
- }
- if fp == nil {
- return true
- }
- return hmac.Equal(fp, key.Fingerprint[:])
-}
-
-// KeysById returns the set of keys that have the given key id.
-// fp can be optionally supplied, which is the full key fingerprint.
-// If it's provided, then it must match. This comes up in the case
-// of GPG subpacket 33.
-func (el EntityList) KeysById(id uint64, fp []byte) (keys []Key) {
- for _, e := range el {
- if keyMatchesIdAndFingerprint(e.PrimaryKey, id, fp) {
- var selfSig *packet.Signature
- for _, ident := range e.Identities {
- if selfSig == nil {
- selfSig = ident.SelfSignature
- } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
- selfSig = ident.SelfSignature
- break
- }
- }
-
- var keyFlags packet.KeyFlagBits
- for _, ident := range e.Identities {
- keyFlags.Merge(ident.SelfSignature.GetKeyFlags())
- }
-
- keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, keyFlags})
- }
-
- for _, subKey := range e.Subkeys {
- if keyMatchesIdAndFingerprint(subKey.PublicKey, id, fp) {
-
- // If there's both a a revocation and a sig, then take the
- // revocation. Otherwise, we can proceed with the sig.
- sig := subKey.Revocation
- if sig == nil {
- sig = subKey.Sig
- }
-
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, sig, sig.GetKeyFlags()})
- }
- }
- }
- return
-}
-
-// KeysByIdAndUsage returns the set of keys with the given id that also meet
-// the key usage given by requiredUsage. The requiredUsage is expressed as
-// the bitwise-OR of packet.KeyFlag* values.
-// fp can be optionally supplied, which is the full key fingerprint.
-// If it's provided, then it must match. This comes up in the case
-// of GPG subpacket 33.
-func (el EntityList) KeysByIdUsage(id uint64, fp []byte, requiredUsage byte) (keys []Key) {
- for _, key := range el.KeysById(id, fp) {
- if len(key.Entity.Revocations) > 0 {
- continue
- }
-
- if key.SelfSignature.RevocationReason != nil {
- continue
- }
-
- if requiredUsage != 0 {
- var usage byte
-
- switch {
- case key.KeyFlags.Valid:
- usage = key.KeyFlags.BitField
-
- case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal:
- // We also need to handle the case where, although the sig's
- // flags aren't valid, the key can is implicitly usable for
- // encryption by virtue of being ElGamal. See also the comment
- // in encryptionKey() above.
- usage |= packet.KeyFlagEncryptCommunications
- usage |= packet.KeyFlagEncryptStorage
-
- case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoDSA ||
- key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoECDSA ||
- key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoEdDSA:
- usage |= packet.KeyFlagSign
-
- // For a primary RSA key without any key flags, be as permissiable
- // as possible.
- case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoRSA &&
- keyMatchesIdAndFingerprint(key.Entity.PrimaryKey, id, fp):
- usage = (packet.KeyFlagCertify | packet.KeyFlagSign |
- packet.KeyFlagEncryptCommunications | packet.KeyFlagEncryptStorage)
- }
-
- if usage&requiredUsage != requiredUsage {
- continue
- }
- }
-
- keys = append(keys, key)
- }
- return
-}
-
-// DecryptionKeys returns all private keys that are valid for decryption.
-func (el EntityList) DecryptionKeys() (keys []Key) {
- for _, e := range el {
- for _, subKey := range e.Subkeys {
- if subKey.PrivateKey != nil && subKey.PrivateKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Sig.GetKeyFlags()})
- }
- }
- }
- return
-}
-
-// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
-func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
- block, err := armor.Decode(r)
- if err == io.EOF {
- return nil, errors.InvalidArgumentError("no armored data found")
- }
- if err != nil {
- return nil, err
- }
- if block.Type != PublicKeyType && block.Type != PrivateKeyType {
- return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
- }
-
- return ReadKeyRing(block.Body)
-}
-
-// ReadKeyRing reads one or more public/private keys. Unsupported keys are
-// ignored as long as at least a single valid key is found.
-func ReadKeyRing(r io.Reader) (el EntityList, err error) {
- packets := packet.NewReader(r)
- var lastUnsupportedError error
-
- for {
- var e *Entity
- e, err = ReadEntity(packets)
- if err != nil {
- // TODO: warn about skipped unsupported/unreadable keys
- if _, ok := err.(errors.UnsupportedError); ok {
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- } else if _, ok := err.(errors.StructuralError); ok {
- // Skip unreadable, badly-formatted keys
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- }
- if err == io.EOF {
- err = nil
- break
- }
- if err != nil {
- el = nil
- break
- }
- } else {
- el = append(el, e)
- }
- }
-
- if len(el) == 0 && err == nil {
- err = lastUnsupportedError
- }
- return
-}
-
-// readToNextPublicKey reads packets until the start of the entity and leaves
-// the first packet of the new entity in the Reader.
-func readToNextPublicKey(packets *packet.Reader) (err error) {
- var p packet.Packet
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return
- } else if err != nil {
- if _, ok := err.(errors.UnsupportedError); ok {
- err = nil
- continue
- }
- return
- }
-
- if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
- packets.Unread(p)
- return
- }
- }
-
- panic("unreachable")
-}
-
-// ReadEntity reads an entity (public key, identities, subkeys etc) from the
-// given Reader.
-func ReadEntity(packets *packet.Reader) (*Entity, error) {
- e := new(Entity)
- e.Identities = make(map[string]*Identity)
-
- p, err := packets.Next()
- if err != nil {
- return nil, err
- }
-
- var ok bool
- if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
- if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
- packets.Unread(p)
- return nil, errors.StructuralError("first packet was not a public/private key")
- } else {
- e.PrimaryKey = &e.PrivateKey.PublicKey
- }
- }
-
- if !e.PrimaryKey.PubKeyAlgo.CanSign() {
- return nil, errors.StructuralError("primary key cannot be used for signatures")
- }
-
- var current *Identity
- var revocations []*packet.Signature
-
- designatedRevokers := make(map[uint64]bool)
-EachPacket:
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return nil, err
- }
- switch pkt := p.(type) {
- case *packet.UserId:
-
- // Make a new Identity object, that we might wind up throwing away.
- // We'll only add it if we get a valid self-signature over this
- // userID.
- current = new(Identity)
- current.Name = pkt.Id
- current.UserId = pkt
- case *packet.Signature:
- if pkt.SigType == packet.SigTypeKeyRevocation {
- // These revocations won't revoke UIDs (see
- // SigTypeIdentityRevocation). Handle these first,
- // because key might have revocation coming from
- // another key (designated revoke).
- revocations = append(revocations, pkt)
- continue
- }
-
- // These are signatures by other people on this key. Let's just ignore them
- // from the beginning, since they shouldn't affect our key decoding one way
- // or the other.
- if pkt.IssuerKeyId != nil && *pkt.IssuerKeyId != e.PrimaryKey.KeyId {
- continue
- }
-
- // If this is a signature made by the keyholder, and the signature has stubbed out
- // critical packets, then *now* we need to bail out.
- if e := pkt.StubbedOutCriticalError; e != nil {
- return nil, e
- }
-
- // Next handle the case of a self-signature. According to RFC8440,
- // Section 5.2.3.3, if there are several self-signatures,
- // we should take the newer one. If they were both created
- // at the same time, but one of them has keyflags specified and the
- // other doesn't, keep the one with the keyflags. We have actually
- // seen this in the wild (see the 'Yield' test in read_test.go).
- // If there is a tie, and both have the same value for FlagsValid,
- // then "last writer wins."
- //
- // HOWEVER! We have seen yet more keys in the wild (see the 'Spiros'
- // test in read_test.go), in which the later self-signature is a bunch
- // of junk, and doesn't even specify key flags. Does it really make
- // sense to overwrite reasonable key flags with the empty set? I'm not
- // sure what that would be trying to achieve, and plus GPG seems to be
- // ok with this situation, and ignores the later (empty) keyflag set.
- // So further tighten our overwrite rules, and only allow the later
- // signature to overwrite the earlier signature if so doing won't
- // trash the key flags.
- if current != nil &&
- (current.SelfSignature == nil ||
- (!pkt.CreationTime.Before(current.SelfSignature.CreationTime) &&
- (pkt.FlagsValid || !current.SelfSignature.FlagsValid))) &&
- (pkt.SigType == packet.SigTypePositiveCert || pkt.SigType == packet.SigTypeGenericCert) &&
- pkt.IssuerKeyId != nil &&
- *pkt.IssuerKeyId == e.PrimaryKey.KeyId {
-
- if err = e.PrimaryKey.VerifyUserIdSignature(current.Name, e.PrimaryKey, pkt); err == nil {
-
- current.SelfSignature = pkt
-
- // NOTE(maxtaco) 2016.01.11
- // Only register an identity once we've gotten a valid self-signature.
- // It's possible therefore for us to throw away `current` in the case
- // no valid self-signatures were found. That's OK as long as there are
- // other identities that make sense.
- //
- // NOTE! We might later see a revocation for this very same UID, and it
- // won't be undone. We've preserved this feature from the original
- // Google OpenPGP we forked from.
- e.Identities[current.Name] = current
- } else {
- // We really should warn that there was a failure here. Not raise an error
- // since this really shouldn't be a fail-stop error.
- }
- } else if current != nil && pkt.SigType == packet.SigTypeIdentityRevocation {
- if err = e.PrimaryKey.VerifyUserIdSignature(current.Name, e.PrimaryKey, pkt); err == nil {
- // Note: we are not removing the identity from
- // e.Identities. Caller can always filter by Revocation
- // field to ignore revoked identities.
- current.Revocation = pkt
- }
- } else if pkt.SigType == packet.SigTypeDirectSignature {
- if err = e.PrimaryKey.VerifyRevocationSignature(e.PrimaryKey, pkt); err == nil {
- if desig := pkt.DesignatedRevoker; desig != nil {
- // If it's a designated revoker signature, take last 8 octects
- // of fingerprint as Key ID and save it to designatedRevokers
- // map. We consult this map later to see if a foreign
- // revocation should be added to UnverifiedRevocations.
- keyID := binary.BigEndian.Uint64(desig.Fingerprint[len(desig.Fingerprint)-8:])
- designatedRevokers[keyID] = true
- }
- }
- } else if current == nil {
- // NOTE(maxtaco)
- //
- // See https://github.com/keybase/client/issues/2666
- //
- // There might have been a user attribute picture before this signature,
- // in which case this is still a valid PGP key. In the future we might
- // not ignore user attributes (like picture). But either way, it doesn't
- // make sense to bail out here. Keep looking for other valid signatures.
- //
- // Used to be:
- // return nil, errors.StructuralError("signature packet found before user id packet")
- } else {
- current.Signatures = append(current.Signatures, pkt)
- }
- case *packet.PrivateKey:
- if pkt.IsSubkey == false {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, &pkt.PublicKey, pkt)
- if err != nil {
- return nil, err
- }
- case *packet.PublicKey:
- if pkt.IsSubkey == false {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, pkt, nil)
- if err != nil {
- return nil, err
- }
- default:
- // we ignore unknown packets
- }
- }
-
- if len(e.Identities) == 0 {
- return nil, errors.StructuralError("entity without any identities")
- }
-
- for _, revocation := range revocations {
- if revocation.IssuerKeyId == nil || *revocation.IssuerKeyId == e.PrimaryKey.KeyId {
- // Key revokes itself, something that we can verify.
- err = e.PrimaryKey.VerifyRevocationSignature(e.PrimaryKey, revocation)
- if err == nil {
- e.Revocations = append(e.Revocations, revocation)
- } else {
- return nil, errors.StructuralError("revocation signature signed by alternate key")
- }
- } else if revocation.IssuerKeyId != nil {
- if _, ok := designatedRevokers[*revocation.IssuerKeyId]; ok {
- // Revocation is done by certified designated revoker,
- // but we can't verify the revocation.
- e.UnverifiedRevocations = append(e.UnverifiedRevocations, revocation)
- }
- }
- }
-
- return e, nil
-}
-
-func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
- var subKey Subkey
- subKey.PublicKey = pub
- subKey.PrivateKey = priv
- var lastErr error
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
- sig, ok := p.(*packet.Signature)
- if !ok {
- // Hit a non-signature packet, so assume we're up to the next key
- packets.Unread(p)
- break
- }
- if st := sig.SigType; st != packet.SigTypeSubkeyBinding && st != packet.SigTypeSubkeyRevocation {
-
- // Note(maxtaco):
- // We used to error out here, but instead, let's fast-forward past
- // packets that are in the wrong place (like misplaced 0x13 signatures)
- // until we get to one that works. For a test case,
- // see TestWithBadSubkeySignaturePackets.
-
- continue
- }
- err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig)
- if err != nil {
- // Non valid signature, so again, no need to abandon all hope, just continue;
- // make a note of the error we hit.
- lastErr = errors.StructuralError("subkey signature invalid: " + err.Error())
- continue
- }
- switch sig.SigType {
- case packet.SigTypeSubkeyBinding:
- // Does the "new" sig set expiration to later date than
- // "previous" sig?
- if subKey.Sig == nil || subKey.Sig.ExpiresBeforeOther(sig) {
- subKey.Sig = sig
- }
- case packet.SigTypeSubkeyRevocation:
- // First writer wins
- if subKey.Revocation == nil {
- subKey.Revocation = sig
- }
- }
- }
-
- if subKey.Sig != nil {
- if err := subKey.PublicKey.ErrorIfDeprecated(); err != nil {
- // Key passed signature check but is deprecated.
- subKey.Sig = nil
- lastErr = err
- }
- }
-
- if subKey.Sig != nil {
- e.Subkeys = append(e.Subkeys, subKey)
- } else {
- if lastErr == nil {
- lastErr = errors.StructuralError("Subkey wasn't signed; expected a 'binding' signature")
- }
- e.BadSubkeys = append(e.BadSubkeys, BadSubkey{Subkey: subKey, Err: lastErr})
- }
- return nil
-}
-
-const defaultRSAKeyBits = 2048
-
-// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
-// single identity composed of the given full name, comment and email, any of
-// which may be empty but must not contain any of "()<>\x00".
-// If config is nil, sensible defaults will be used.
-func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
- currentTime := config.Now()
-
- bits := defaultRSAKeyBits
- if config != nil && config.RSABits != 0 {
- bits = config.RSABits
- }
-
- uid := packet.NewUserId(name, comment, email)
- if uid == nil {
- return nil, errors.InvalidArgumentError("user id field contained invalid characters")
- }
- signingPriv, err := rsa.GenerateKey(config.Random(), bits)
- if err != nil {
- return nil, err
- }
- encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
- if err != nil {
- return nil, err
- }
-
- e := &Entity{
- PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
- PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
- Identities: make(map[string]*Identity),
- }
- isPrimaryId := true
- e.Identities[uid.Id] = &Identity{
- Name: uid.Id,
- UserId: uid,
- SelfSignature: &packet.Signature{
- CreationTime: currentTime,
- SigType: packet.SigTypePositiveCert,
- PubKeyAlgo: packet.PubKeyAlgoRSA,
- Hash: config.Hash(),
- IsPrimaryId: &isPrimaryId,
- FlagsValid: true,
- FlagSign: true,
- FlagCertify: true,
- IssuerKeyId: &e.PrimaryKey.KeyId,
- },
- }
-
- // If the user passes in a DefaultHash via packet.Config, set the
- // PreferredHash for the SelfSignature.
- if config != nil && config.DefaultHash != 0 {
- e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
- }
-
- // Likewise for DefaultCipher.
- if config != nil && config.DefaultCipher != 0 {
- e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
- }
-
- e.Subkeys = make([]Subkey, 1)
- e.Subkeys[0] = Subkey{
- PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
- PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
- Sig: &packet.Signature{
- CreationTime: currentTime,
- SigType: packet.SigTypeSubkeyBinding,
- PubKeyAlgo: packet.PubKeyAlgoRSA,
- Hash: config.Hash(),
- FlagsValid: true,
- FlagEncryptStorage: true,
- FlagEncryptCommunications: true,
- IssuerKeyId: &e.PrimaryKey.KeyId,
- },
- }
- e.Subkeys[0].PublicKey.IsSubkey = true
- e.Subkeys[0].PrivateKey.IsSubkey = true
-
- return e, nil
-}
-
-// SerializePrivate serializes an Entity, including private key material, to
-// the given Writer. For now, it must only be used on an Entity returned from
-// NewEntity.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
- err = e.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return
- }
- if e.PrivateKey.PrivateKey != nil {
- err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- }
- err = ident.SelfSignature.Serialize(w)
- if err != nil {
- return
- }
- }
- for _, subkey := range e.Subkeys {
- err = subkey.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- if e.PrivateKey.PrivateKey != nil && !config.ReuseSignatures() {
- // If not reusing existing signatures, sign subkey using private key
- // (subkey binding), but also sign primary key using subkey (primary
- // key binding) if subkey is used for signing.
- if subkey.Sig.FlagSign {
- err = subkey.Sig.CrossSignKey(e.PrimaryKey, subkey.PrivateKey, config)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- }
-
- if subkey.Revocation != nil {
- err = subkey.Revocation.Serialize(w)
- if err != nil {
- return
- }
- }
-
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return
- }
- }
- return nil
-}
-
-// Serialize writes the public part of the given Entity to w. (No private
-// key material will be output).
-func (e *Entity) Serialize(w io.Writer) error {
- err := e.PrimaryKey.Serialize(w)
- if err != nil {
- return err
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return err
- }
- err = ident.SelfSignature.Serialize(w)
- if err != nil {
- return err
- }
- for _, sig := range ident.Signatures {
- err = sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- }
- for _, subkey := range e.Subkeys {
- err = subkey.PublicKey.Serialize(w)
- if err != nil {
- return err
- }
-
- if subkey.Revocation != nil {
- err = subkey.Revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// SignIdentity adds a signature to e, from signer, attesting that identity is
-// associated with e. The provided identity must already be an element of
-// e.Identities and the private key of signer must have been decrypted if
-// necessary.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
- if signer.PrivateKey == nil {
- return errors.InvalidArgumentError("signing Entity must have a private key")
- }
- if signer.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
- }
- ident, ok := e.Identities[identity]
- if !ok {
- return errors.InvalidArgumentError("given identity string not found in Entity")
- }
-
- sig := &packet.Signature{
- SigType: packet.SigTypeGenericCert,
- PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
- Hash: config.Hash(),
- CreationTime: config.Now(),
- IssuerKeyId: &signer.PrivateKey.KeyId,
- }
- if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
- return err
- }
- ident.Signatures = append(ident.Signatures, sig)
- return nil
-}
-
-// CopySubkeyRevocations copies subkey revocations from the src Entity over
-// to the receiver entity. We need this because `gpg --export-secret-key` does
-// not appear to output subkey revocations. In this case we need to manually
-// merge with the output of `gpg --export`.
-func (e *Entity) CopySubkeyRevocations(src *Entity) {
- m := make(map[[20]byte]*packet.Signature)
- for _, subkey := range src.Subkeys {
- if subkey.Revocation != nil {
- m[subkey.PublicKey.Fingerprint] = subkey.Revocation
- }
- }
- for i, subkey := range e.Subkeys {
- if r := m[subkey.PublicKey.Fingerprint]; r != nil {
- e.Subkeys[i].Revocation = r
- }
- }
-}
-
-// CheckDesignatedRevokers will try to confirm any of designated
-// revocation of entity. For this function to work, revocation
-// issuer's key should be found in keyring. First successfully
-// verified designated revocation is returned along with the key that
-// verified it.
-func FindVerifiedDesignatedRevoke(keyring KeyRing, entity *Entity) (*packet.Signature, *Key) {
- for _, sig := range entity.UnverifiedRevocations {
- if sig.IssuerKeyId == nil {
- continue
- }
-
- issuerKeyId := *sig.IssuerKeyId
- issuerFingerprint := sig.IssuerFingerprint
- keys := keyring.KeysByIdUsage(issuerKeyId, issuerFingerprint, packet.KeyFlagSign)
- if len(keys) == 0 {
- continue
- }
- for _, key := range keys {
- err := key.PublicKey.VerifyRevocationSignature(entity.PrimaryKey, sig)
- if err == nil {
- return sig, &key
- }
- }
- }
-
- return nil, nil
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go
deleted file mode 100644
index f023fe53..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "compress/bzip2"
- "compress/flate"
- "compress/zlib"
- "io"
- "strconv"
-
- "github.com/keybase/go-crypto/openpgp/errors"
-)
-
-// Compressed represents a compressed OpenPGP packet. The decompressed contents
-// will contain more OpenPGP packets. See RFC 4880, section 5.6.
-type Compressed struct {
- Body io.Reader
-}
-
-const (
- NoCompression = flate.NoCompression
- BestSpeed = flate.BestSpeed
- BestCompression = flate.BestCompression
- DefaultCompression = flate.DefaultCompression
-)
-
-// CompressionConfig contains compressor configuration settings.
-type CompressionConfig struct {
- // Level is the compression level to use. It must be set to
- // between -1 and 9, with -1 causing the compressor to use the
- // default compression level, 0 causing the compressor to use
- // no compression and 1 to 9 representing increasing (better,
- // slower) compression levels. If Level is less than -1 or
- // more then 9, a non-nil error will be returned during
- // encryption. See the constants above for convenient common
- // settings for Level.
- Level int
-}
-
-func (c *Compressed) parse(r io.Reader) error {
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
-
- switch buf[0] {
- case 1:
- c.Body = flate.NewReader(r)
- case 2:
- c.Body, err = zlib.NewReader(r)
- case 3:
- c.Body = bzip2.NewReader(r)
- default:
- err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
- }
-
- return err
-}
-
-// compressedWriterCloser represents the serialized compression stream
-// header and the compressor. Its Close() method ensures that both the
-// compressor and serialized stream header are closed. Its Write()
-// method writes to the compressor.
-type compressedWriteCloser struct {
- sh io.Closer // Stream Header
- c io.WriteCloser // Compressor
-}
-
-func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
- return cwc.c.Write(p)
-}
-
-func (cwc compressedWriteCloser) Close() (err error) {
- err = cwc.c.Close()
- if err != nil {
- return err
- }
-
- return cwc.sh.Close()
-}
-
-// SerializeCompressed serializes a compressed data packet to w and
-// returns a WriteCloser to which the literal data packets themselves
-// can be written and which MUST be closed on completion. If cc is
-// nil, sensible defaults will be used to configure the compression
-// algorithm.
-func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
- compressed, err := serializeStreamHeader(w, packetTypeCompressed)
- if err != nil {
- return
- }
-
- _, err = compressed.Write([]byte{uint8(algo)})
- if err != nil {
- return
- }
-
- level := DefaultCompression
- if cc != nil {
- level = cc.Level
- }
-
- var compressor io.WriteCloser
- switch algo {
- case CompressionZIP:
- compressor, err = flate.NewWriter(compressed, level)
- case CompressionZLIB:
- compressor, err = zlib.NewWriterLevel(compressed, level)
- default:
- s := strconv.Itoa(int(algo))
- err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
- }
- if err != nil {
- return
- }
-
- literaldata = compressedWriteCloser{compressed, compressor}
-
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go
deleted file mode 100644
index f4125e18..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "crypto/rand"
- "io"
- "time"
-)
-
-// Config collects a number of parameters along with sensible defaults.
-// A nil *Config is valid and results in all default values.
-type Config struct {
- // Rand provides the source of entropy.
- // If nil, the crypto/rand Reader is used.
- Rand io.Reader
- // DefaultHash is the default hash function to be used.
- // If zero, SHA-256 is used.
- DefaultHash crypto.Hash
- // DefaultCipher is the cipher to be used.
- // If zero, AES-128 is used.
- DefaultCipher CipherFunction
- // Time returns the current time as the number of seconds since the
- // epoch. If Time is nil, time.Now is used.
- Time func() time.Time
- // DefaultCompressionAlgo is the compression algorithm to be
- // applied to the plaintext before encryption. If zero, no
- // compression is done.
- DefaultCompressionAlgo CompressionAlgo
- // CompressionConfig configures the compression settings.
- CompressionConfig *CompressionConfig
- // S2KCount is only used for symmetric encryption. It
- // determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 1024 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 65536 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. When set, it is strongly encrouraged to
- // use a value that is at least 65536. See RFC 4880 Section
- // 3.7.1.3.
- S2KCount int
- // RSABits is the number of bits in new RSA keys made with NewEntity.
- // If zero, then 2048 bit keys are created.
- RSABits int
- // ReuseSignatures tells us to reuse existing Signatures
- // on serialized output.
- ReuseSignaturesOnSerialize bool
-}
-
-func (c *Config) Random() io.Reader {
- if c == nil || c.Rand == nil {
- return rand.Reader
- }
- return c.Rand
-}
-
-func (c *Config) Hash() crypto.Hash {
- if c == nil || uint(c.DefaultHash) == 0 {
- return crypto.SHA256
- }
- return c.DefaultHash
-}
-
-func (c *Config) Cipher() CipherFunction {
- if c == nil || uint8(c.DefaultCipher) == 0 {
- return CipherAES128
- }
- return c.DefaultCipher
-}
-
-func (c *Config) Now() time.Time {
- if c == nil || c.Time == nil {
- return time.Now()
- }
- return c.Time()
-}
-
-func (c *Config) Compression() CompressionAlgo {
- if c == nil {
- return CompressionNone
- }
- return c.DefaultCompressionAlgo
-}
-
-func (c *Config) PasswordHashIterations() int {
- if c == nil || c.S2KCount == 0 {
- return 0
- }
- return c.S2KCount
-}
-
-func (c *Config) ReuseSignatures() bool {
- return c != nil && c.ReuseSignaturesOnSerialize
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go
deleted file mode 100644
index 41de661d..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package packet
-
-import (
- "bytes"
- "io"
- "math/big"
-
- "github.com/keybase/go-crypto/openpgp/ecdh"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/s2k"
-)
-
-// ECDHKdfParams generates KDF parameters sequence for given
-// PublicKey. See https://tools.ietf.org/html/rfc6637#section-8
-func ECDHKdfParams(pub *PublicKey) []byte {
- buf := new(bytes.Buffer)
- oid := pub.ec.oid
- buf.WriteByte(byte(len(oid)))
- buf.Write(oid)
- buf.WriteByte(18) // ECDH TYPE
- pub.ecdh.serialize(buf)
- buf.WriteString("Anonymous Sender ")
- buf.Write(pub.Fingerprint[:])
- return buf.Bytes()
-}
-
-func decryptKeyECDH(priv *PrivateKey, X, Y *big.Int, C []byte) (out []byte, err error) {
- ecdhpriv, ok := priv.PrivateKey.(*ecdh.PrivateKey)
- if !ok {
- return nil, errors.InvalidArgumentError("bad internal ECDH key")
- }
-
- Sx := ecdhpriv.DecryptShared(X, Y)
-
- kdfParams := ECDHKdfParams(&priv.PublicKey)
- hash, ok := s2k.HashIdToHash(byte(priv.ecdh.KdfHash))
- if !ok {
- return nil, errors.InvalidArgumentError("invalid hash id in private key")
- }
-
- key := ecdhpriv.KDF(Sx, kdfParams, hash)
- keySize := CipherFunction(priv.ecdh.KdfAlgo).KeySize()
-
- decrypted, err := ecdh.AESKeyUnwrap(key[:keySize], C)
- if err != nil {
- return nil, err
- }
-
- // We have to "read ahead" to discover real length of the
- // encryption key and properly unpad buffer.
- cipherFunc := CipherFunction(decrypted[0])
- // +3 bytes = 1-byte cipher id and checksum 2-byte checksum.
- out = ecdh.UnpadBuffer(decrypted, cipherFunc.KeySize()+3)
- if out == nil {
- return nil, errors.InvalidArgumentError("invalid padding while ECDH")
- }
- return out, nil
-}
-
-func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *PublicKey, keyBlock []byte) error {
- ecdhpub := pub.PublicKey.(*ecdh.PublicKey)
- kdfParams := ECDHKdfParams(pub)
-
- hash, ok := s2k.HashIdToHash(byte(pub.ecdh.KdfHash))
- if !ok {
- return errors.InvalidArgumentError("invalid hash id in private key")
- }
-
- kdfKeySize := CipherFunction(pub.ecdh.KdfAlgo).KeySize()
- Vx, Vy, C, err := ecdhpub.Encrypt(rand, kdfParams, keyBlock, hash, kdfKeySize)
- if err != nil {
- return err
- }
-
- mpis, mpiBitLen := ecdh.Marshal(ecdhpub.Curve, Vx, Vy)
-
- packetLen := len(header) /* header length in bytes */
- packetLen += 2 /* mpi length in bits */ + len(mpis)
- packetLen += 1 /* ciphertext size in bytes */ + len(C)
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
-
- _, err = w.Write([]byte{byte(mpiBitLen >> 8), byte(mpiBitLen)})
- if err != nil {
- return err
- }
-
- _, err = w.Write(mpis[:])
- if err != nil {
- return err
- }
-
- w.Write([]byte{byte(len(C))})
- w.Write(C[:])
- return nil
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go
deleted file mode 100644
index c0b6c954..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "encoding/binary"
- "io"
- "math/big"
- "strconv"
-
- "github.com/keybase/go-crypto/openpgp/ecdh"
- "github.com/keybase/go-crypto/openpgp/elgamal"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/rsa"
-)
-
-const encryptedKeyVersion = 3
-
-// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
-// section 5.1.
-type EncryptedKey struct {
- KeyId uint64
- Algo PublicKeyAlgorithm
- CipherFunc CipherFunction // only valid after a successful Decrypt
- Key []byte // only valid after a successful Decrypt
-
- encryptedMPI1, encryptedMPI2 parsedMPI
- ecdh_C []byte
-}
-
-func (e *EncryptedKey) parse(r io.Reader) (err error) {
- var buf [10]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != encryptedKeyVersion {
- return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
- }
- e.KeyId = binary.BigEndian.Uint64(buf[1:9])
- e.Algo = PublicKeyAlgorithm(buf[9])
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
- case PubKeyAlgoElGamal:
- e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
- e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
- case PubKeyAlgoECDH:
- e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
- if err != nil {
- return err
- }
- _, err = readFull(r, buf[:1]) // read C len (1 byte)
- if err != nil {
- return err
- }
- e.ecdh_C = make([]byte, int(buf[0]))
- _, err = readFull(r, e.ecdh_C)
- }
-
- if err != nil {
- return err
- }
-
- _, err = consumeAll(r)
- return err
-}
-
-func checksumKeyMaterial(key []byte) uint16 {
- var checksum uint16
- for _, v := range key {
- checksum += uint16(v)
- }
- return checksum
-}
-
-// Decrypt decrypts an encrypted session key with the given private key. The
-// private key must have been decrypted first.
-// If config is nil, sensible defaults will be used.
-func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
- var err error
- var b []byte
-
- // TODO(agl): use session key decryption routines here to avoid
- // padding oracle attacks.
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- k := priv.PrivateKey.(*rsa.PrivateKey)
- b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
- case PubKeyAlgoElGamal:
- c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
- c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
- b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
- case PubKeyAlgoECDH:
- // Note: Unmarshal checks if point is on the curve.
- c1, c2 := ecdh.Unmarshal(priv.PrivateKey.(*ecdh.PrivateKey).Curve, e.encryptedMPI1.bytes)
- if c1 == nil {
- return errors.InvalidArgumentError("failed to parse EC point for encryption key")
- }
- b, err = decryptKeyECDH(priv, c1, c2, e.ecdh_C)
- default:
- err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
-
- if err != nil {
- return err
- }
-
- e.CipherFunc = CipherFunction(b[0])
- e.Key = b[1 : len(b)-2]
- expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
- checksum := checksumKeyMaterial(e.Key)
- if checksum != expectedChecksum {
- return errors.StructuralError("EncryptedKey checksum incorrect")
- }
-
- return nil
-}
-
-// Serialize writes the encrypted key packet, e, to w.
-func (e *EncryptedKey) Serialize(w io.Writer) error {
- var mpiLen int
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- mpiLen = 2 + len(e.encryptedMPI1.bytes)
- case PubKeyAlgoElGamal:
- mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
- default:
- return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
- }
-
- serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
-
- w.Write([]byte{encryptedKeyVersion})
- binary.Write(w, binary.BigEndian, e.KeyId)
- w.Write([]byte{byte(e.Algo)})
-
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- writeMPIs(w, e.encryptedMPI1)
- case PubKeyAlgoElGamal:
- writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
- default:
- panic("internal error")
- }
-
- return nil
-}
-
-// SerializeEncryptedKey serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
- var buf [10]byte
- buf[0] = encryptedKeyVersion
- binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
- buf[9] = byte(pub.PubKeyAlgo)
-
- keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
- keyBlock[0] = byte(cipherFunc)
- copy(keyBlock[1:], key)
- checksum := checksumKeyMaterial(key)
- keyBlock[1+len(key)] = byte(checksum >> 8)
- keyBlock[1+len(key)+1] = byte(checksum)
-
- switch pub.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
- case PubKeyAlgoElGamal:
- return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
- case PubKeyAlgoECDH:
- return serializeEncryptedKeyECDH(w, config.Random(), buf, pub, keyBlock)
- case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
- return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
- }
-
- return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
-}
-
-func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
- cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
- }
-
- packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
-}
-
-func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
- c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
- }
-
- packetLen := 10 /* header length */
- packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
- packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- err = writeBig(w, c1)
- if err != nil {
- return err
- }
- return writeBig(w, c2)
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go
deleted file mode 100644
index 1a9ec6e5..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "encoding/binary"
- "io"
-)
-
-// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
-type LiteralData struct {
- IsBinary bool
- FileName string
- Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
- Body io.Reader
-}
-
-// ForEyesOnly returns whether the contents of the LiteralData have been marked
-// as especially sensitive.
-func (l *LiteralData) ForEyesOnly() bool {
- return l.FileName == "_CONSOLE"
-}
-
-func (l *LiteralData) parse(r io.Reader) (err error) {
- var buf [256]byte
-
- _, err = readFull(r, buf[:2])
- if err != nil {
- return
- }
-
- l.IsBinary = buf[0] == 'b'
- fileNameLen := int(buf[1])
-
- _, err = readFull(r, buf[:fileNameLen])
- if err != nil {
- return
- }
-
- l.FileName = string(buf[:fileNameLen])
-
- _, err = readFull(r, buf[:4])
- if err != nil {
- return
- }
-
- l.Time = binary.BigEndian.Uint32(buf[:4])
- l.Body = r
- return
-}
-
-// SerializeLiteral serializes a literal data packet to w and returns a
-// WriteCloser to which the data itself can be written and which MUST be closed
-// on completion. The fileName is truncated to 255 bytes.
-func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
- var buf [4]byte
- buf[0] = 't'
- if isBinary {
- buf[0] = 'b'
- }
- if len(fileName) > 255 {
- fileName = fileName[:255]
- }
- buf[1] = byte(len(fileName))
-
- inner, err := serializeStreamHeader(w, packetTypeLiteralData)
- if err != nil {
- return
- }
-
- _, err = inner.Write(buf[:2])
- if err != nil {
- return
- }
- _, err = inner.Write([]byte(fileName))
- if err != nil {
- return
- }
- binary.BigEndian.PutUint32(buf[:], time)
- _, err = inner.Write(buf[:])
- if err != nil {
- return
- }
-
- plaintext = inner
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go
deleted file mode 100644
index ce2a33a5..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
-
-package packet
-
-import (
- "crypto/cipher"
-)
-
-type ocfbEncrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
-// performed.
-type OCFBResyncOption bool
-
-const (
- OCFBResync OCFBResyncOption = true
- OCFBNoResync OCFBResyncOption = false
-)
-
-// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block, and an initial amount of
-// ciphertext. randData must be random bytes and be the same length as the
-// cipher.Block's block size. Resync determines if the "resynchronization step"
-// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
-// this point.
-func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
- blockSize := block.BlockSize()
- if len(randData) != blockSize {
- return nil, nil
- }
-
- x := &ocfbEncrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefix := make([]byte, blockSize+2)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefix[i] = randData[i] ^ x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
- prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- return x, prefix
-}
-
-func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- x.fre[x.outUsed] ^= src[i]
- dst[i] = x.fre[x.outUsed]
- x.outUsed++
- }
-}
-
-type ocfbDecrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block. Prefix must be the first
-// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
-// block size. If an incorrect key is detected then nil is returned. On
-// successful exit, blockSize+2 bytes of decrypted data are written into
-// prefix. Resync determines if the "resynchronization step" from RFC 4880,
-// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
-func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
- blockSize := block.BlockSize()
- if len(prefix) != blockSize+2 {
- return nil
- }
-
- x := &ocfbDecrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefixCopy := make([]byte, len(prefix))
- copy(prefixCopy, prefix)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefixCopy[i] ^= x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefixCopy[blockSize] ^= x.fre[0]
- prefixCopy[blockSize+1] ^= x.fre[1]
-
- if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
- prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
- return nil
- }
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- copy(prefix, prefixCopy)
- return x
-}
-
-func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- c := src[i]
- dst[i] = x.fre[x.outUsed] ^ src[i]
- x.fre[x.outUsed] = c
- x.outUsed++
- }
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go
deleted file mode 100644
index af404bb1..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "encoding/binary"
- "io"
- "strconv"
-
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/s2k"
-)
-
-// OnePassSignature represents a one-pass signature packet. See RFC 4880,
-// section 5.4.
-type OnePassSignature struct {
- SigType SignatureType
- Hash crypto.Hash
- PubKeyAlgo PublicKeyAlgorithm
- KeyId uint64
- IsLast bool
-}
-
-const onePassSignatureVersion = 3
-
-func (ops *OnePassSignature) parse(r io.Reader) (err error) {
- var buf [13]byte
-
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != onePassSignatureVersion {
- err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
- }
-
- var ok bool
- ops.Hash, ok = s2k.HashIdToHash(buf[2])
- if !ok {
- return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
- }
-
- ops.SigType = SignatureType(buf[1])
- ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
- ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
- ops.IsLast = buf[12] != 0
- return
-}
-
-// Serialize marshals the given OnePassSignature to w.
-func (ops *OnePassSignature) Serialize(w io.Writer) error {
- var buf [13]byte
- buf[0] = onePassSignatureVersion
- buf[1] = uint8(ops.SigType)
- var ok bool
- buf[2], ok = s2k.HashToHashId(ops.Hash)
- if !ok {
- return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
- }
- buf[3] = uint8(ops.PubKeyAlgo)
- binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
- if ops.IsLast {
- buf[12] = 1
- }
-
- if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
- return err
- }
- _, err := w.Write(buf[:])
- return err
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go
deleted file mode 100644
index cdeea012..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-
- "github.com/keybase/go-crypto/openpgp/errors"
-)
-
-// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
-// useful for splitting and storing the original packet contents separately,
-// handling unsupported packet types or accessing parts of the packet not yet
-// implemented by this package.
-type OpaquePacket struct {
- // Packet type
- Tag uint8
- // Reason why the packet was parsed opaquely
- Reason error
- // Binary contents of the packet data
- Contents []byte
-}
-
-func (op *OpaquePacket) parse(r io.Reader) (err error) {
- op.Contents, err = ioutil.ReadAll(r)
- return
-}
-
-// Serialize marshals the packet to a writer in its original form, including
-// the packet header.
-func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
- err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
- if err == nil {
- _, err = w.Write(op.Contents)
- }
- return
-}
-
-// Parse attempts to parse the opaque contents into a structure supported by
-// this package. If the packet is not known then the result will be another
-// OpaquePacket.
-func (op *OpaquePacket) Parse() (p Packet, err error) {
- hdr := bytes.NewBuffer(nil)
- err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
- if err != nil {
- op.Reason = err
- return op, err
- }
- p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
- if err != nil {
- op.Reason = err
- p = op
- }
- return
-}
-
-// OpaqueReader reads OpaquePackets from an io.Reader.
-type OpaqueReader struct {
- r io.Reader
-}
-
-func NewOpaqueReader(r io.Reader) *OpaqueReader {
- return &OpaqueReader{r: r}
-}
-
-// Read the next OpaquePacket.
-func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
- tag, _, contents, err := readHeader(or.r)
- if err != nil {
- return
- }
- op = &OpaquePacket{Tag: uint8(tag), Reason: err}
- err = op.parse(contents)
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
-// as found in signature and user attribute packets.
-type OpaqueSubpacket struct {
- SubType uint8
- Contents []byte
-}
-
-// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
-// their byte representation.
-func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
- var (
- subHeaderLen int
- subPacket *OpaqueSubpacket
- )
- for len(contents) > 0 {
- subHeaderLen, subPacket, err = nextSubpacket(contents)
- if err != nil {
- break
- }
- result = append(result, subPacket)
- contents = contents[subHeaderLen+len(subPacket.Contents):]
- }
- return
-}
-
-func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
- // RFC 4880, section 5.2.3.1
- var subLen uint32
- if len(contents) < 1 {
- goto Truncated
- }
- subPacket = &OpaqueSubpacket{}
- switch {
- case contents[0] < 192:
- subHeaderLen = 2 // 1 length byte, 1 subtype byte
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- subLen = uint32(contents[0])
- contents = contents[1:]
- case contents[0] < 255:
- subHeaderLen = 3 // 2 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
- contents = contents[2:]
- default:
- subHeaderLen = 6 // 5 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- subLen = uint32(contents[1])<<24 |
- uint32(contents[2])<<16 |
- uint32(contents[3])<<8 |
- uint32(contents[4])
- contents = contents[5:]
- }
- if subLen > uint32(len(contents)) || subLen == 0 {
- goto Truncated
- }
- subPacket.SubType = contents[0]
- subPacket.Contents = contents[1:subLen]
- return
-Truncated:
- err = errors.StructuralError("subpacket truncated")
- return
-}
-
-func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
- buf := make([]byte, 6)
- n := serializeSubpacketLength(buf, len(osp.Contents)+1)
- buf[n] = osp.SubType
- if _, err = w.Write(buf[:n+1]); err != nil {
- return
- }
- _, err = w.Write(osp.Contents)
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go
deleted file mode 100644
index eb61eda9..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go
+++ /dev/null
@@ -1,576 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packet implements parsing and serialization of OpenPGP packets, as
-// specified in RFC 4880.
-package packet // import "github.com/keybase/go-crypto/openpgp/packet"
-
-import (
- "bufio"
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
- "crypto/elliptic"
- "io"
- "math/big"
-
- "github.com/keybase/go-crypto/cast5"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/rsa"
-)
-
-// readFull is the same as io.ReadFull except that reading zero bytes returns
-// ErrUnexpectedEOF rather than EOF.
-func readFull(r io.Reader, buf []byte) (n int, err error) {
- n, err = io.ReadFull(r, buf)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
-func readLength(r io.Reader) (length int64, isPartial bool, err error) {
- var buf [4]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- switch {
- case buf[0] < 192:
- length = int64(buf[0])
- case buf[0] < 224:
- length = int64(buf[0]-192) << 8
- _, err = readFull(r, buf[0:1])
- if err != nil {
- return
- }
- length += int64(buf[0]) + 192
- case buf[0] < 255:
- length = int64(1) << (buf[0] & 0x1f)
- isPartial = true
- default:
- _, err = readFull(r, buf[0:4])
- if err != nil {
- return
- }
- length = int64(buf[0])<<24 |
- int64(buf[1])<<16 |
- int64(buf[2])<<8 |
- int64(buf[3])
- }
- return
-}
-
-// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
-// The continuation lengths are parsed and removed from the stream and EOF is
-// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
-type partialLengthReader struct {
- r io.Reader
- remaining int64
- isPartial bool
-}
-
-func (r *partialLengthReader) Read(p []byte) (n int, err error) {
- for r.remaining == 0 {
- if !r.isPartial {
- return 0, io.EOF
- }
- r.remaining, r.isPartial, err = readLength(r.r)
- if err != nil {
- return 0, err
- }
- }
-
- toRead := int64(len(p))
- if toRead > r.remaining {
- toRead = r.remaining
- }
-
- n, err = r.r.Read(p[:int(toRead)])
- r.remaining -= int64(n)
- if n < int(toRead) && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
-// See RFC 4880, section 4.2.2.4.
-type partialLengthWriter struct {
- w io.WriteCloser
- lengthByte [1]byte
-}
-
-func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
- for len(p) > 0 {
- for power := uint(14); power < 32; power-- {
- l := 1 << power
- if len(p) >= l {
- w.lengthByte[0] = 224 + uint8(power)
- _, err = w.w.Write(w.lengthByte[:])
- if err != nil {
- return
- }
- var m int
- m, err = w.w.Write(p[:l])
- n += m
- if err != nil {
- return
- }
- p = p[l:]
- break
- }
- }
- }
- return
-}
-
-func (w *partialLengthWriter) Close() error {
- w.lengthByte[0] = 0
- _, err := w.w.Write(w.lengthByte[:])
- if err != nil {
- return err
- }
- return w.w.Close()
-}
-
-// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
-// underlying Reader returns EOF before the limit has been reached.
-type spanReader struct {
- r io.Reader
- n int64
-}
-
-func (l *spanReader) Read(p []byte) (n int, err error) {
- if l.n <= 0 {
- return 0, io.EOF
- }
- if int64(len(p)) > l.n {
- p = p[0:l.n]
- }
- n, err = l.r.Read(p)
- l.n -= int64(n)
- if l.n > 0 && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readHeader parses a packet header and returns an io.Reader which will return
-// the contents of the packet. See RFC 4880, section 4.2.
-func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
- var buf [4]byte
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0]&0x80 == 0 {
- err = errors.StructuralError("tag byte does not have MSB set")
- return
- }
- if buf[0]&0x40 == 0 {
- // Old format packet
- tag = packetType((buf[0] & 0x3f) >> 2)
- lengthType := buf[0] & 3
- if lengthType == 3 {
- length = -1
- contents = r
- return
- }
- lengthBytes := 1 << lengthType
- _, err = readFull(r, buf[0:lengthBytes])
- if err != nil {
- return
- }
- for i := 0; i < lengthBytes; i++ {
- length <<= 8
- length |= int64(buf[i])
- }
- contents = &spanReader{r, length}
- return
- }
-
- // New format packet
- tag = packetType(buf[0] & 0x3f)
- length, isPartial, err := readLength(r)
- if err != nil {
- return
- }
- if isPartial {
- contents = &partialLengthReader{
- remaining: length,
- isPartial: true,
- r: r,
- }
- length = -1
- } else {
- contents = &spanReader{r, length}
- }
- return
-}
-
-// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
-// 4.2.
-func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
- var buf [6]byte
- var n int
-
- buf[0] = 0x80 | 0x40 | byte(ptype)
- if length < 192 {
- buf[1] = byte(length)
- n = 2
- } else if length < 8384 {
- length -= 192
- buf[1] = 192 + byte(length>>8)
- buf[2] = byte(length)
- n = 3
- } else {
- buf[1] = 255
- buf[2] = byte(length >> 24)
- buf[3] = byte(length >> 16)
- buf[4] = byte(length >> 8)
- buf[5] = byte(length)
- n = 6
- }
-
- _, err = w.Write(buf[:n])
- return
-}
-
-// serializeStreamHeader writes an OpenPGP packet header to w where the
-// length of the packet is unknown. It returns a io.WriteCloser which can be
-// used to write the contents of the packet. See RFC 4880, section 4.2.
-func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
- var buf [1]byte
- buf[0] = 0x80 | 0x40 | byte(ptype)
- _, err = w.Write(buf[:])
- if err != nil {
- return
- }
- out = &partialLengthWriter{w: w}
- return
-}
-
-// Packet represents an OpenPGP packet. Users are expected to try casting
-// instances of this interface to specific packet types.
-type Packet interface {
- parse(io.Reader) error
-}
-
-// consumeAll reads from the given Reader until error, returning the number of
-// bytes read.
-func consumeAll(r io.Reader) (n int64, err error) {
- var m int
- var buf [1024]byte
-
- for {
- m, err = r.Read(buf[:])
- n += int64(m)
- if err == io.EOF {
- err = nil
- return
- }
- if err != nil {
- return
- }
- }
-
- panic("unreachable")
-}
-
-// packetType represents the numeric ids of the different OpenPGP packet types. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
-type packetType uint8
-
-const (
- packetTypeEncryptedKey packetType = 1
- packetTypeSignature packetType = 2
- packetTypeSymmetricKeyEncrypted packetType = 3
- packetTypeOnePassSignature packetType = 4
- packetTypePrivateKey packetType = 5
- packetTypePublicKey packetType = 6
- packetTypePrivateSubkey packetType = 7
- packetTypeCompressed packetType = 8
- packetTypeSymmetricallyEncrypted packetType = 9
- packetTypeLiteralData packetType = 11
- packetTypeUserId packetType = 13
- packetTypePublicSubkey packetType = 14
- packetTypeUserAttribute packetType = 17
- packetTypeSymmetricallyEncryptedMDC packetType = 18
-)
-
-// peekVersion detects the version of a public key packet about to
-// be read. A bufio.Reader at the original position of the io.Reader
-// is returned.
-func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
- bufr = bufio.NewReader(r)
- var verBuf []byte
- if verBuf, err = bufr.Peek(1); err != nil {
- return
- }
- ver = verBuf[0]
- return
-}
-
-// Read reads a single OpenPGP packet from the given io.Reader. If there is an
-// error parsing a packet, the whole packet is consumed from the input.
-func Read(r io.Reader) (p Packet, err error) {
- tag, _, contents, err := readHeader(r)
- if err != nil {
- return
- }
-
- switch tag {
- case packetTypeEncryptedKey:
- p = new(EncryptedKey)
- case packetTypeSignature:
- var version byte
- // Detect signature version
- if contents, version, err = peekVersion(contents); err != nil {
- return
- }
- if version < 4 {
- p = new(SignatureV3)
- } else {
- p = new(Signature)
- }
- case packetTypeSymmetricKeyEncrypted:
- p = new(SymmetricKeyEncrypted)
- case packetTypeOnePassSignature:
- p = new(OnePassSignature)
- case packetTypePrivateKey, packetTypePrivateSubkey:
- pk := new(PrivateKey)
- if tag == packetTypePrivateSubkey {
- pk.IsSubkey = true
- }
- p = pk
- case packetTypePublicKey, packetTypePublicSubkey:
- var version byte
- if contents, version, err = peekVersion(contents); err != nil {
- return
- }
- isSubkey := tag == packetTypePublicSubkey
- if version < 4 {
- p = &PublicKeyV3{IsSubkey: isSubkey}
- } else {
- p = &PublicKey{IsSubkey: isSubkey}
- }
- case packetTypeCompressed:
- p = new(Compressed)
- case packetTypeSymmetricallyEncrypted:
- p = new(SymmetricallyEncrypted)
- case packetTypeLiteralData:
- p = new(LiteralData)
- case packetTypeUserId:
- p = new(UserId)
- case packetTypeUserAttribute:
- p = new(UserAttribute)
- case packetTypeSymmetricallyEncryptedMDC:
- se := new(SymmetricallyEncrypted)
- se.MDC = true
- p = se
- default:
- err = errors.UnknownPacketTypeError(tag)
- }
- if p != nil {
- err = p.parse(contents)
- }
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// SignatureType represents the different semantic meanings of an OpenPGP
-// signature. See RFC 4880, section 5.2.1.
-type SignatureType uint8
-
-const (
- SigTypeBinary SignatureType = 0
- SigTypeText = 1
- SigTypeGenericCert = 0x10
- SigTypePersonaCert = 0x11
- SigTypeCasualCert = 0x12
- SigTypePositiveCert = 0x13
- SigTypeSubkeyBinding = 0x18
- SigTypePrimaryKeyBinding = 0x19
- SigTypeDirectSignature = 0x1F
- SigTypeKeyRevocation = 0x20
- SigTypeSubkeyRevocation = 0x28
- SigTypeIdentityRevocation = 0x30
-)
-
-// PublicKeyAlgorithm represents the different public key system specified for
-// OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
-type PublicKeyAlgorithm uint8
-
-const (
- PubKeyAlgoRSA PublicKeyAlgorithm = 1
- PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
- PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
- PubKeyAlgoElGamal PublicKeyAlgorithm = 16
- PubKeyAlgoDSA PublicKeyAlgorithm = 17
- // RFC 6637, Section 5.
- PubKeyAlgoECDH PublicKeyAlgorithm = 18
- PubKeyAlgoECDSA PublicKeyAlgorithm = 19
-
- PubKeyAlgoBadElGamal PublicKeyAlgorithm = 20 // Reserved (deprecated, formerly ElGamal Encrypt or Sign)
- // RFC -1
- PubKeyAlgoEdDSA PublicKeyAlgorithm = 22
-)
-
-// CanEncrypt returns true if it's possible to encrypt a message to a public
-// key of the given type.
-func (pka PublicKeyAlgorithm) CanEncrypt() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH:
- return true
- }
- return false
-}
-
-// CanSign returns true if it's possible for a public key of the given type to
-// sign a message.
-func (pka PublicKeyAlgorithm) CanSign() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA:
- return true
- }
- return false
-}
-
-// CipherFunction represents the different block ciphers specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
-type CipherFunction uint8
-
-const (
- Cipher3DES CipherFunction = 2
- CipherCAST5 CipherFunction = 3
- CipherAES128 CipherFunction = 7
- CipherAES192 CipherFunction = 8
- CipherAES256 CipherFunction = 9
-)
-
-// KeySize returns the key size, in bytes, of cipher.
-func (cipher CipherFunction) KeySize() int {
- switch cipher {
- case Cipher3DES:
- return 24
- case CipherCAST5:
- return cast5.KeySize
- case CipherAES128:
- return 16
- case CipherAES192:
- return 24
- case CipherAES256:
- return 32
- }
- return 0
-}
-
-// blockSize returns the block size, in bytes, of cipher.
-func (cipher CipherFunction) blockSize() int {
- switch cipher {
- case Cipher3DES:
- return des.BlockSize
- case CipherCAST5:
- return 8
- case CipherAES128, CipherAES192, CipherAES256:
- return 16
- }
- return 0
-}
-
-// new returns a fresh instance of the given cipher.
-func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
- switch cipher {
- case Cipher3DES:
- block, _ = des.NewTripleDESCipher(key)
- case CipherCAST5:
- block, _ = cast5.NewCipher(key)
- case CipherAES128, CipherAES192, CipherAES256:
- block, _ = aes.NewCipher(key)
- }
- return
-}
-
-// readMPI reads a big integer from r. The bit length returned is the bit
-// length that was specified in r. This is preserved so that the integer can be
-// reserialized exactly.
-func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
- var buf [2]byte
- _, err = readFull(r, buf[0:])
- if err != nil {
- return
- }
- bitLength = uint16(buf[0])<<8 | uint16(buf[1])
- numBytes := (int(bitLength) + 7) / 8
- mpi = make([]byte, numBytes)
- _, err = readFull(r, mpi)
- // According to RFC 4880 3.2. we should check that the MPI has no leading
- // zeroes (at least when not an encrypted MPI?), but this implementation
- // does generate leading zeroes, so we keep accepting them.
- return
-}
-
-// writeMPI serializes a big integer to w.
-func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
- // Note that we can produce leading zeroes, in violation of RFC 4880 3.2.
- // Implementations seem to be tolerant of them, and stripping them would
- // make it complex to guarantee matching re-serialization.
- _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
- if err == nil {
- _, err = w.Write(mpiBytes)
- }
- return
-}
-
-func WritePaddedBigInt(w io.Writer, length int, X *big.Int) (n int, err error) {
- bytes := X.Bytes()
- n1, err := w.Write(make([]byte, length-len(bytes)))
- if err != nil {
- return n1, err
- }
- n2, err := w.Write(bytes)
- if err != nil {
- return n2, err
- }
- return (n1 + n2), err
-}
-
-// Minimum number of bytes to fit the curve coordinates. All
-// coordinates have to be 0-padded to this length.
-func mpiPointByteLength(curve elliptic.Curve) int {
- return (curve.Params().P.BitLen() + 7) / 8
-}
-
-// writeBig serializes a *big.Int to w.
-func writeBig(w io.Writer, i *big.Int) error {
- return writeMPI(w, uint16(i.BitLen()), i.Bytes())
-}
-
-// padToKeySize left-pads a MPI with zeroes to match the length of the
-// specified RSA public.
-func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
- k := (pub.N.BitLen() + 7) / 8
- if len(b) >= k {
- return b
- }
- bb := make([]byte, k)
- copy(bb[len(bb)-len(b):], b)
- return bb
-}
-
-// CompressionAlgo Represents the different compression algorithms
-// supported by OpenPGP (except for BZIP2, which is not currently
-// supported). See Section 9.3 of RFC 4880.
-type CompressionAlgo uint8
-
-const (
- CompressionNone CompressionAlgo = 0
- CompressionZIP CompressionAlgo = 1
- CompressionZLIB CompressionAlgo = 2
-)
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go
deleted file mode 100644
index 5305b1f6..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go
+++ /dev/null
@@ -1,557 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/sha1"
- "fmt"
- "io"
- "io/ioutil"
- "math/big"
- "strconv"
- "time"
-
- "github.com/keybase/go-crypto/ed25519"
- "github.com/keybase/go-crypto/openpgp/ecdh"
- "github.com/keybase/go-crypto/openpgp/elgamal"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/s2k"
- "github.com/keybase/go-crypto/rsa"
-)
-
-// PrivateKey represents a possibly encrypted private key. See RFC 4880,
-// section 5.5.3.
-type PrivateKey struct {
- PublicKey
- Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
- encryptedData []byte
- cipher CipherFunction
- s2k func(out, in []byte)
- PrivateKey interface{} // An *rsa.PrivateKey or *dsa.PrivateKey.
- sha1Checksum bool
- iv []byte
- s2kHeader []byte
-}
-
-type EdDSAPrivateKey struct {
- PrivateKey
- seed parsedMPI
-}
-
-func (e *EdDSAPrivateKey) Sign(digest []byte) (R, S []byte, err error) {
- r := bytes.NewReader(e.seed.bytes)
- publicKey, privateKey, err := ed25519.GenerateKey(r)
- if err != nil {
- return nil, nil, err
- }
-
- if !bytes.Equal(publicKey, e.PublicKey.edk.p.bytes[1:]) { // [1:] because [0] is 0x40 mpi header
- return nil, nil, errors.UnsupportedError("EdDSA: Private key does not match public key.")
- }
-
- sig := ed25519.Sign(privateKey, digest)
-
- sigLen := ed25519.SignatureSize / 2
- return sig[:sigLen], sig[sigLen:], nil
-}
-
-func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDHPrivateKey(currentTime time.Time, priv *ecdh.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDHPublicKey(currentTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func (pk *PrivateKey) parse(r io.Reader) (err error) {
- err = (&pk.PublicKey).parse(r)
- if err != nil {
- return
- }
- var buf [1]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
-
- s2kType := buf[0]
-
- switch s2kType {
- case 0:
- pk.s2k = nil
- pk.Encrypted = false
- case 254, 255:
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.cipher = CipherFunction(buf[0])
- pk.Encrypted = true
- pk.s2k, err = s2k.Parse(r)
- if err != nil {
- return
- }
- if s2kType == 254 {
- pk.sha1Checksum = true
- }
- // S2K == nil implies that we got a "GNU Dummy" S2K. For instance,
- // because our master secret key is on a USB key in a vault somewhere.
- // In that case, there is no further data to consume here.
- if pk.s2k == nil {
- pk.Encrypted = false
- return
- }
- default:
- return errors.UnsupportedError("deprecated s2k function in private key")
- }
- if pk.Encrypted {
- blockSize := pk.cipher.blockSize()
- if blockSize == 0 {
- return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
- }
- pk.iv = make([]byte, blockSize)
- _, err = readFull(r, pk.iv)
- if err != nil {
- return
- }
- }
-
- pk.encryptedData, err = ioutil.ReadAll(r)
- if err != nil {
- return
- }
-
- if !pk.Encrypted {
- return pk.parsePrivateKey(pk.encryptedData)
- }
-
- return
-}
-
-func mod64kHash(d []byte) uint16 {
- var h uint16
- for _, b := range d {
- h += uint16(b)
- }
- return h
-}
-
-// Encrypt is the counterpart to the Decrypt() method below. It encrypts
-// the private key with the provided passphrase. If config is nil, then
-// the standard, and sensible, defaults apply.
-//
-// A key will be derived from the given passphrase using S2K Specifier
-// Type 3 (Iterated + Salted, see RFC-4880 Sec. 3.7.1.3). This choice
-// is hardcoded in s2k.Serialize(). S2KCount is hardcoded to 0, which is
-// equivalent to 65536. And the hash algorithm for key-derivation can be
-// set with config. The encrypted PrivateKey, using the algorithm specified
-// in config (if provided), is written out to the encryptedData member.
-// When Serialize() is called, this encryptedData member will be
-// serialized, using S2K Usage value of 254, and thus SHA1 checksum.
-func (pk *PrivateKey) Encrypt(passphrase []byte, config *Config) (err error) {
- if pk.PrivateKey == nil {
- return errors.InvalidArgumentError("there is no private key to encrypt")
- }
-
- pk.sha1Checksum = true
- pk.cipher = config.Cipher()
- s2kConfig := s2k.Config{
- Hash: config.Hash(),
- S2KCount: 0,
- }
- s2kBuf := bytes.NewBuffer(nil)
- derivedKey := make([]byte, pk.cipher.KeySize())
- err = s2k.Serialize(s2kBuf, derivedKey, config.Random(), passphrase, &s2kConfig)
- if err != nil {
- return err
- }
-
- pk.s2kHeader = s2kBuf.Bytes()
- // No good way to set pk.s2k but to call s2k.Parse(),
- // even though we have all the information here, but
- // most of the functions needed are private to s2k.
- pk.s2k, err = s2k.Parse(s2kBuf)
- pk.iv = make([]byte, pk.cipher.blockSize())
- if _, err = config.Random().Read(pk.iv); err != nil {
- return err
- }
-
- privateKeyBuf := bytes.NewBuffer(nil)
- if err = pk.serializePrivateKey(privateKeyBuf); err != nil {
- return err
- }
-
- checksum := sha1.Sum(privateKeyBuf.Bytes())
- if _, err = privateKeyBuf.Write(checksum[:]); err != nil {
- return err
- }
-
- pkData := privateKeyBuf.Bytes()
- block := pk.cipher.new(derivedKey)
- pk.encryptedData = make([]byte, len(pkData))
- cfb := cipher.NewCFBEncrypter(block, pk.iv)
- cfb.XORKeyStream(pk.encryptedData, pkData)
- pk.Encrypted = true
- return nil
-}
-
-func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
- buf := bytes.NewBuffer(nil)
- err = pk.PublicKey.serializeWithoutHeaders(buf)
- if err != nil {
- return
- }
-
- privateKeyBuf := bytes.NewBuffer(nil)
-
- if pk.PrivateKey == nil {
- _, err = buf.Write([]byte{
- 254, // SHA-1 Convention
- 9, // Encryption scheme (AES256)
- 101, // GNU Extensions
- 2, // Hash value (SHA1)
- 'G', 'N', 'U', // "GNU" as a string
- 1, // Extension type 1001 (minus 1000)
- })
- } else if pk.Encrypted {
- _, err = buf.Write([]byte{
- 254, // SHA-1 Convention
- byte(pk.cipher), // Encryption scheme
- })
- if err != nil {
- return err
- }
- if _, err = buf.Write(pk.s2kHeader); err != nil {
- return err
- }
- if _, err = buf.Write(pk.iv); err != nil {
- return err
- }
- if _, err = privateKeyBuf.Write(pk.encryptedData); err != nil {
- return err
- }
- } else {
- buf.WriteByte(0 /* no encryption */)
- if err = pk.serializePrivateKey(privateKeyBuf); err != nil {
- return err
- }
- }
-
- ptype := packetTypePrivateKey
- contents := buf.Bytes()
- privateKeyBytes := privateKeyBuf.Bytes()
- if pk.IsSubkey {
- ptype = packetTypePrivateSubkey
- }
- totalLen := len(contents) + len(privateKeyBytes)
- if !pk.Encrypted {
- totalLen += 2
- }
- err = serializeHeader(w, ptype, totalLen)
- if err != nil {
- return
- }
- _, err = w.Write(contents)
- if err != nil {
- return
- }
- _, err = w.Write(privateKeyBytes)
- if err != nil {
- return
- }
-
- if len(privateKeyBytes) > 0 && !pk.Encrypted {
- checksum := mod64kHash(privateKeyBytes)
- var checksumBytes [2]byte
- checksumBytes[0] = byte(checksum >> 8)
- checksumBytes[1] = byte(checksum)
- _, err = w.Write(checksumBytes[:])
- }
-
- return
-}
-
-func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) {
- switch priv := pk.PrivateKey.(type) {
- case *rsa.PrivateKey:
- err = serializeRSAPrivateKey(w, priv)
- case *dsa.PrivateKey:
- err = serializeDSAPrivateKey(w, priv)
- case *elgamal.PrivateKey:
- err = serializeElGamalPrivateKey(w, priv)
- case *ecdsa.PrivateKey:
- err = serializeECDSAPrivateKey(w, priv)
- case *ecdh.PrivateKey:
- err = serializeECDHPrivateKey(w, priv)
- case *EdDSAPrivateKey:
- err = serializeEdDSAPrivateKey(w, priv)
- default:
- err = errors.InvalidArgumentError("unknown private key type")
- }
-
- return err
-}
-
-func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
- err := writeBig(w, priv.D)
- if err != nil {
- return err
- }
- err = writeBig(w, priv.Primes[1])
- if err != nil {
- return err
- }
- err = writeBig(w, priv.Primes[0])
- if err != nil {
- return err
- }
- return writeBig(w, priv.Precomputed.Qinv)
-}
-
-func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
- return writeBig(w, priv.X)
-}
-
-func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
- return writeBig(w, priv.X)
-}
-
-func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
- return writeBig(w, priv.D)
-}
-
-func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error {
- return writeBig(w, priv.X)
-}
-
-func serializeEdDSAPrivateKey(w io.Writer, priv *EdDSAPrivateKey) error {
- return writeMPI(w, priv.seed.bitLength, priv.seed.bytes)
-}
-
-// Decrypt decrypts an encrypted private key using a passphrase.
-func (pk *PrivateKey) Decrypt(passphrase []byte) error {
- if !pk.Encrypted {
- return nil
- }
- // For GNU Dummy S2K, there's no key here, so don't do anything.
- if pk.s2k == nil {
- return nil
- }
-
- key := make([]byte, pk.cipher.KeySize())
- pk.s2k(key, passphrase)
- block := pk.cipher.new(key)
- cfb := cipher.NewCFBDecrypter(block, pk.iv)
-
- data := make([]byte, len(pk.encryptedData))
- cfb.XORKeyStream(data, pk.encryptedData)
-
- if pk.sha1Checksum {
- if len(data) < sha1.Size {
- return errors.StructuralError("truncated private key data")
- }
- h := sha1.New()
- h.Write(data[:len(data)-sha1.Size])
- sum := h.Sum(nil)
- if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-sha1.Size]
- } else {
- if len(data) < 2 {
- return errors.StructuralError("truncated private key data")
- }
- var sum uint16
- for i := 0; i < len(data)-2; i++ {
- sum += uint16(data[i])
- }
- if data[len(data)-2] != uint8(sum>>8) ||
- data[len(data)-1] != uint8(sum) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-2]
- }
-
- return pk.parsePrivateKey(data)
-}
-
-func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
- switch pk.PublicKey.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
- return pk.parseRSAPrivateKey(data)
- case PubKeyAlgoDSA:
- return pk.parseDSAPrivateKey(data)
- case PubKeyAlgoElGamal:
- return pk.parseElGamalPrivateKey(data)
- case PubKeyAlgoECDSA:
- return pk.parseECDSAPrivateKey(data)
- case PubKeyAlgoECDH:
- return pk.parseECDHPrivateKey(data)
- case PubKeyAlgoEdDSA:
- return pk.parseEdDSAPrivateKey(data)
- }
- panic("impossible")
-}
-
-func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
- rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
- rsaPriv := new(rsa.PrivateKey)
- rsaPriv.PublicKey = *rsaPub
-
- buf := bytes.NewBuffer(data)
- d, _, err := readMPI(buf)
- if err != nil {
- return
- }
- p, _, err := readMPI(buf)
- if err != nil {
- return
- }
- q, _, err := readMPI(buf)
- if err != nil {
- return
- }
-
- rsaPriv.D = new(big.Int).SetBytes(d)
- rsaPriv.Primes = make([]*big.Int, 2)
- rsaPriv.Primes[0] = new(big.Int).SetBytes(p)
- rsaPriv.Primes[1] = new(big.Int).SetBytes(q)
- if err := rsaPriv.Validate(); err != nil {
- return err
- }
- rsaPriv.Precompute()
- pk.PrivateKey = rsaPriv
- pk.Encrypted = false
- pk.encryptedData = nil
-
- return nil
-}
-
-func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
- dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
- dsaPriv := new(dsa.PrivateKey)
- dsaPriv.PublicKey = *dsaPub
-
- buf := bytes.NewBuffer(data)
- x, _, err := readMPI(buf)
- if err != nil {
- return
- }
-
- dsaPriv.X = new(big.Int).SetBytes(x)
- pk.PrivateKey = dsaPriv
- pk.Encrypted = false
- pk.encryptedData = nil
-
- return nil
-}
-
-func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
- pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
- priv := new(elgamal.PrivateKey)
- priv.PublicKey = *pub
-
- buf := bytes.NewBuffer(data)
- x, _, err := readMPI(buf)
- if err != nil {
- return
- }
-
- priv.X = new(big.Int).SetBytes(x)
- pk.PrivateKey = priv
- pk.Encrypted = false
- pk.encryptedData = nil
-
- return nil
-}
-
-func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) {
- pub := pk.PublicKey.PublicKey.(*ecdh.PublicKey)
- priv := new(ecdh.PrivateKey)
- priv.PublicKey = *pub
-
- buf := bytes.NewBuffer(data)
- d, _, err := readMPI(buf)
- if err != nil {
- return
- }
-
- priv.X = new(big.Int).SetBytes(d)
- pk.PrivateKey = priv
- pk.Encrypted = false
- pk.encryptedData = nil
- return nil
-}
-
-func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
- ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
- ecdsaPriv := new(ecdsa.PrivateKey)
- ecdsaPriv.PublicKey = *ecdsaPub
-
- buf := bytes.NewBuffer(data)
- d, _, err := readMPI(buf)
- if err != nil {
- return
- }
-
- ecdsaPriv.D = new(big.Int).SetBytes(d)
- pk.PrivateKey = ecdsaPriv
- pk.Encrypted = false
- pk.encryptedData = nil
-
- return nil
-}
-
-func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) {
- eddsaPriv := new(EdDSAPrivateKey)
- eddsaPriv.PublicKey = pk.PublicKey
-
- buf := bytes.NewBuffer(data)
- eddsaPriv.seed.bytes, eddsaPriv.seed.bitLength, err = readMPI(buf)
- if err != nil {
- return err
- }
-
- if bLen := len(eddsaPriv.seed.bytes); bLen != 32 { // 32 bytes private part of ed25519 key.
- return errors.UnsupportedError(fmt.Sprintf("Unexpected EdDSA private key length: %d", bLen))
- }
-
- pk.PrivateKey = eddsaPriv
- pk.Encrypted = false
- pk.encryptedData = nil
-
- return nil
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go
deleted file mode 100644
index a46a008a..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go
+++ /dev/null
@@ -1,990 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/dsa"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/sha1"
- _ "crypto/sha256"
- _ "crypto/sha512"
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "math/big"
- "strconv"
- "time"
-
- "github.com/keybase/go-crypto/brainpool"
- "github.com/keybase/go-crypto/curve25519"
- "github.com/keybase/go-crypto/ed25519"
- "github.com/keybase/go-crypto/openpgp/ecdh"
- "github.com/keybase/go-crypto/openpgp/elgamal"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/s2k"
- "github.com/keybase/go-crypto/rsa"
-)
-
-var (
- // NIST curve P-224
- oidCurveP224 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x21}
- // NIST curve P-256
- oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}
- // NIST curve P-384
- oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22}
- // NIST curve P-521
- oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23}
- // Brainpool curve P-256r1
- oidCurveP256r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}
- // Brainpool curve P-384r1
- oidCurveP384r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}
- // Brainpool curve P-512r1
- oidCurveP512r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}
- // EdDSA
- oidEdDSA []byte = []byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}
- // cv25519
- oidCurve25519 []byte = []byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}
-)
-
-const maxOIDLength = 10
-
-// ecdsaKey stores the algorithm-specific fields for ECDSA keys.
-// as defined in RFC 6637, Section 9.
-type ecdsaKey struct {
- // oid contains the OID byte sequence identifying the elliptic curve used
- oid []byte
- // p contains the elliptic curve point that represents the public key
- p parsedMPI
-}
-
-type edDSAkey struct {
- ecdsaKey
-}
-
-func copyFrontFill(dst, src []byte, length int) int {
- if srcLen := len(src); srcLen < length {
- return copy(dst[length-srcLen:], src[:])
- } else {
- return copy(dst[:], src[:])
- }
-}
-
-func (e *edDSAkey) Verify(payload []byte, r parsedMPI, s parsedMPI) bool {
- const halfSigSize = ed25519.SignatureSize / 2
- var sig [ed25519.SignatureSize]byte
-
- // NOTE: The first byte is 0x40 - MPI header
- // TODO: Maybe clean the code up and use 0x40 as a header when
- // reading and keep only actual number in p field. Find out how
- // other MPIs are stored.
- key := e.p.bytes[1:]
-
- // Note: it may happen that R + S do not form 64-byte signature buffer that
- // ed25519 expects, but because we copy it over to an array of exact size,
- // we will always pass correctly sized slice to Verify. Slice too short
- // would make ed25519 panic().
- copyFrontFill(sig[:halfSigSize], r.bytes, halfSigSize)
- copyFrontFill(sig[halfSigSize:], s.bytes, halfSigSize)
-
- return ed25519.Verify(key, payload, sig[:])
-}
-
-// parseOID reads the OID for the curve as defined in RFC 6637, Section 9.
-func parseOID(r io.Reader) (oid []byte, err error) {
- buf := make([]byte, maxOIDLength)
- if _, err = readFull(r, buf[:1]); err != nil {
- return
- }
- oidLen := buf[0]
- if int(oidLen) > len(buf) {
- err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen)))
- return
- }
- oid = buf[:oidLen]
- _, err = readFull(r, oid)
- return
-}
-
-func (f *ecdsaKey) parse(r io.Reader) (err error) {
- if f.oid, err = parseOID(r); err != nil {
- return err
- }
- f.p.bytes, f.p.bitLength, err = readMPI(r)
- return err
-}
-
-func (f *ecdsaKey) serialize(w io.Writer) (err error) {
- buf := make([]byte, maxOIDLength+1)
- buf[0] = byte(len(f.oid))
- copy(buf[1:], f.oid)
- if _, err = w.Write(buf[:len(f.oid)+1]); err != nil {
- return
- }
- return writeMPIs(w, f.p)
-}
-
-func getCurveByOid(oid []byte) elliptic.Curve {
- switch {
- case bytes.Equal(oid, oidCurveP224):
- return elliptic.P224()
- case bytes.Equal(oid, oidCurveP256):
- return elliptic.P256()
- case bytes.Equal(oid, oidCurveP384):
- return elliptic.P384()
- case bytes.Equal(oid, oidCurveP521):
- return elliptic.P521()
- case bytes.Equal(oid, oidCurveP256r1):
- return brainpool.P256r1()
- case bytes.Equal(oid, oidCurveP384r1):
- return brainpool.P384r1()
- case bytes.Equal(oid, oidCurveP512r1):
- return brainpool.P512r1()
- case bytes.Equal(oid, oidCurve25519):
- return curve25519.Cv25519()
- default:
- return nil
- }
-}
-
-func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) {
- var c = getCurveByOid(f.oid)
- // Curve25519 should not be used in ECDSA.
- if c == nil || bytes.Equal(f.oid, oidCurve25519) {
- return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid))
- }
- // Note: Unmarshal already checks if point is on curve.
- x, y := elliptic.Unmarshal(c, f.p.bytes)
- if x == nil {
- return nil, errors.UnsupportedError("failed to parse EC point")
- }
- return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil
-}
-
-func (f *ecdsaKey) newECDH() (*ecdh.PublicKey, error) {
- var c = getCurveByOid(f.oid)
- if c == nil {
- return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid))
- }
- // ecdh.Unmarshal handles unmarshaling for all curve types. It
- // also checks if point is on curve.
- x, y := ecdh.Unmarshal(c, f.p.bytes)
- if x == nil {
- return nil, errors.UnsupportedError("failed to parse EC point")
- }
- return &ecdh.PublicKey{Curve: c, X: x, Y: y}, nil
-}
-
-func (f *ecdsaKey) byteLen() int {
- return 1 + len(f.oid) + 2 + len(f.p.bytes)
-}
-
-type kdfHashFunction byte
-type kdfAlgorithm byte
-
-// ecdhKdf stores key derivation function parameters
-// used for ECDH encryption. See RFC 6637, Section 9.
-type ecdhKdf struct {
- KdfHash kdfHashFunction
- KdfAlgo kdfAlgorithm
-}
-
-func (f *ecdhKdf) parse(r io.Reader) (err error) {
- buf := make([]byte, 1)
- if _, err = readFull(r, buf); err != nil {
- return
- }
- kdfLen := int(buf[0])
- if kdfLen < 3 {
- return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
- }
- buf = make([]byte, kdfLen)
- if _, err = readFull(r, buf); err != nil {
- return
- }
- reserved := int(buf[0])
- f.KdfHash = kdfHashFunction(buf[1])
- f.KdfAlgo = kdfAlgorithm(buf[2])
- if reserved != 0x01 {
- return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved))
- }
- return
-}
-
-func (f *ecdhKdf) serialize(w io.Writer) (err error) {
- buf := make([]byte, 4)
- // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys.
- buf[0] = byte(0x03) // Length of the following fields
- buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now
- buf[2] = byte(f.KdfHash)
- buf[3] = byte(f.KdfAlgo)
- _, err = w.Write(buf[:])
- return
-}
-
-func (f *ecdhKdf) byteLen() int {
- return 4
-}
-
-// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
-type PublicKey struct {
- CreationTime time.Time
- PubKeyAlgo PublicKeyAlgorithm
- PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey
- Fingerprint [20]byte
- KeyId uint64
- IsSubkey bool
-
- n, e, p, q, g, y parsedMPI
-
- // RFC 6637 fields
- ec *ecdsaKey
- ecdh *ecdhKdf
-
- // EdDSA fields (no RFC available), uses ecdsa scaffolding
- edk *edDSAkey
-}
-
-// signingKey provides a convenient abstraction over signature verification
-// for v3 and v4 public keys.
-type signingKey interface {
- SerializeSignaturePrefix(io.Writer)
- serializeWithoutHeaders(io.Writer) error
-}
-
-func FromBig(n *big.Int) parsedMPI {
- return parsedMPI{
- bytes: n.Bytes(),
- bitLength: uint16(n.BitLen()),
- }
-}
-
-func FromBytes(bytes []byte) parsedMPI {
- return parsedMPI{
- bytes: bytes,
- bitLength: uint16(8 * len(bytes)),
- }
-}
-
-// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
-func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoRSA,
- PublicKey: pub,
- n: FromBig(pub.N),
- e: FromBig(big.NewInt(int64(pub.E))),
- }
-
- pk.setFingerPrintAndKeyId()
- return pk
-}
-
-// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
-func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoDSA,
- PublicKey: pub,
- p: FromBig(pub.P),
- q: FromBig(pub.Q),
- g: FromBig(pub.G),
- y: FromBig(pub.Y),
- }
-
- pk.setFingerPrintAndKeyId()
- return pk
-}
-
-// check EdDSA public key material.
-// There is currently no RFC for it, but it doesn't mean it's not
-// implemented or in use.
-func (e *edDSAkey) check() error {
- if !bytes.Equal(e.oid, oidEdDSA) {
- return errors.UnsupportedError(fmt.Sprintf("Bad OID for EdDSA key: %v", e.oid))
- }
- if bLen := len(e.p.bytes); bLen != 33 { // 32 bytes for ed25519 key and 1 byte for 0x40 header
- return errors.UnsupportedError(fmt.Sprintf("Unexpected EdDSA public key length: %d", bLen))
- }
- return nil
-}
-
-// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
-func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
- pk := &PublicKey{
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoElGamal,
- PublicKey: pub,
- p: FromBig(pub.P),
- g: FromBig(pub.G),
- y: FromBig(pub.Y),
- }
-
- pk.setFingerPrintAndKeyId()
- return pk
-}
-
-func getCurveOid(curve elliptic.Curve) (res []byte, err error) {
- switch curve {
- case elliptic.P224():
- res = oidCurveP224
- case elliptic.P256():
- res = oidCurveP256
- case elliptic.P384():
- res = oidCurveP384
- case elliptic.P521():
- res = oidCurveP521
- case brainpool.P256r1():
- res = oidCurveP256r1
- case brainpool.P384r1():
- res = oidCurveP384r1
- case brainpool.P512r1():
- res = oidCurveP512r1
- case curve25519.Cv25519():
- res = oidCurve25519
- default:
- err = errors.UnsupportedError("unknown curve")
- }
- return
-}
-
-func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDSA,
- PublicKey: pub,
- ec: new(ecdsaKey),
- }
- oid, _ := getCurveOid(pub.Curve)
- pk.ec.oid = oid
- bs, bitLen := ecdh.Marshal(pub.Curve, pub.X, pub.Y)
- pk.ec.p.bytes = bs
- pk.ec.p.bitLength = uint16(bitLen)
-
- pk.setFingerPrintAndKeyId()
- return pk
-}
-
-func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey {
- pk := &PublicKey{
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDH,
- PublicKey: pub,
- ec: new(ecdsaKey),
- }
- oid, _ := getCurveOid(pub.Curve)
- pk.ec.oid = oid
- bs, bitLen := ecdh.Marshal(pub.Curve, pub.X, pub.Y)
- pk.ec.p.bytes = bs
- pk.ec.p.bitLength = uint16(bitLen)
-
- hashbyte, _ := s2k.HashToHashId(crypto.SHA512)
- pk.ecdh = &ecdhKdf{
- KdfHash: kdfHashFunction(hashbyte),
- KdfAlgo: kdfAlgorithm(CipherAES256),
- }
-
- pk.setFingerPrintAndKeyId()
- return pk
-}
-
-func (pk *PublicKey) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.5.2
- var buf [6]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != 4 {
- return errors.UnsupportedError("public key version")
- }
- pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
- pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- err = pk.parseRSA(r)
- case PubKeyAlgoDSA:
- err = pk.parseDSA(r)
- case PubKeyAlgoElGamal:
- err = pk.parseElGamal(r)
- case PubKeyAlgoEdDSA:
- pk.edk = new(edDSAkey)
- if err = pk.edk.parse(r); err != nil {
- return err
- }
- err = pk.edk.check()
- case PubKeyAlgoECDSA:
- pk.ec = new(ecdsaKey)
- if err = pk.ec.parse(r); err != nil {
- return err
- }
- pk.PublicKey, err = pk.ec.newECDSA()
- case PubKeyAlgoECDH:
- pk.ec = new(ecdsaKey)
- if err = pk.ec.parse(r); err != nil {
- return
- }
- pk.ecdh = new(ecdhKdf)
- if err = pk.ecdh.parse(r); err != nil {
- return
- }
- pk.PublicKey, err = pk.ec.newECDH()
- case PubKeyAlgoBadElGamal:
- // Key has ElGamal format but nil-implementation - it will
- // load but it's not possible to do any operations using this
- // key.
- err = pk.parseElGamal(r)
- if err != nil {
- pk.PublicKey = nil
- }
- default:
- err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
- }
- if err != nil {
- return
- }
-
- pk.setFingerPrintAndKeyId()
- return
-}
-
-func (pk *PublicKey) setFingerPrintAndKeyId() {
- // RFC 4880, section 12.2
- fingerPrint := sha1.New()
- pk.SerializeSignaturePrefix(fingerPrint)
- pk.serializeWithoutHeaders(fingerPrint)
- copy(pk.Fingerprint[:], fingerPrint.Sum(nil))
- pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
-}
-
-// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
- pk.n.bytes, pk.n.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
- pk.e.bytes, pk.e.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
-
- if len(pk.e.bytes) > 7 {
- err = errors.UnsupportedError("large public exponent")
- return
- }
- rsa := &rsa.PublicKey{
- N: new(big.Int).SetBytes(pk.n.bytes),
- E: 0,
- }
- // Warning: incompatibility with crypto/rsa: keybase fork uses
- // int64 public exponents instead of int32.
- for i := 0; i < len(pk.e.bytes); i++ {
- rsa.E <<= 8
- rsa.E |= int64(pk.e.bytes[i])
- }
- pk.PublicKey = rsa
- return
-}
-
-// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
- pk.p.bytes, pk.p.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
- pk.q.bytes, pk.q.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
- pk.g.bytes, pk.g.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
- pk.y.bytes, pk.y.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
-
- dsa := new(dsa.PublicKey)
- dsa.P = new(big.Int).SetBytes(pk.p.bytes)
- dsa.Q = new(big.Int).SetBytes(pk.q.bytes)
- dsa.G = new(big.Int).SetBytes(pk.g.bytes)
- dsa.Y = new(big.Int).SetBytes(pk.y.bytes)
- pk.PublicKey = dsa
- return
-}
-
-// parseElGamal parses ElGamal public key material from the given Reader. See
-// RFC 4880, section 5.5.2.
-func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
- pk.p.bytes, pk.p.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
- pk.g.bytes, pk.g.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
- pk.y.bytes, pk.y.bitLength, err = readMPI(r)
- if err != nil {
- return
- }
-
- elgamal := new(elgamal.PublicKey)
- elgamal.P = new(big.Int).SetBytes(pk.p.bytes)
- elgamal.G = new(big.Int).SetBytes(pk.g.bytes)
- elgamal.Y = new(big.Int).SetBytes(pk.y.bytes)
- pk.PublicKey = elgamal
- return
-}
-
-// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
-// The prefix is used when calculating a signature over this public key. See
-// RFC 4880, section 5.2.4.
-func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) {
- var pLength uint16
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- pLength += 2 + uint16(len(pk.n.bytes))
- pLength += 2 + uint16(len(pk.e.bytes))
- case PubKeyAlgoDSA:
- pLength += 2 + uint16(len(pk.p.bytes))
- pLength += 2 + uint16(len(pk.q.bytes))
- pLength += 2 + uint16(len(pk.g.bytes))
- pLength += 2 + uint16(len(pk.y.bytes))
- case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal:
- pLength += 2 + uint16(len(pk.p.bytes))
- pLength += 2 + uint16(len(pk.g.bytes))
- pLength += 2 + uint16(len(pk.y.bytes))
- case PubKeyAlgoECDSA:
- pLength += uint16(pk.ec.byteLen())
- case PubKeyAlgoECDH:
- pLength += uint16(pk.ec.byteLen())
- pLength += uint16(pk.ecdh.byteLen())
- case PubKeyAlgoEdDSA:
- pLength += uint16(pk.edk.byteLen())
- default:
- panic("unknown public key algorithm")
- }
- pLength += 6
- h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
- return
-}
-
-func (pk *PublicKey) Serialize(w io.Writer) (err error) {
- length := 6 // 6 byte header
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- length += 2 + len(pk.n.bytes)
- length += 2 + len(pk.e.bytes)
- case PubKeyAlgoDSA:
- length += 2 + len(pk.p.bytes)
- length += 2 + len(pk.q.bytes)
- length += 2 + len(pk.g.bytes)
- length += 2 + len(pk.y.bytes)
- case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal:
- length += 2 + len(pk.p.bytes)
- length += 2 + len(pk.g.bytes)
- length += 2 + len(pk.y.bytes)
- case PubKeyAlgoECDSA:
- length += pk.ec.byteLen()
- case PubKeyAlgoECDH:
- length += pk.ec.byteLen()
- length += pk.ecdh.byteLen()
- case PubKeyAlgoEdDSA:
- length += pk.edk.byteLen()
- default:
- panic("unknown public key algorithm")
- }
-
- packetType := packetTypePublicKey
- if pk.IsSubkey {
- packetType = packetTypePublicSubkey
- }
- err = serializeHeader(w, packetType, length)
- if err != nil {
- return
- }
- return pk.serializeWithoutHeaders(w)
-}
-
-// serializeWithoutHeaders marshals the PublicKey to w in the form of an
-// OpenPGP public key packet, not including the packet header.
-func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
- var buf [6]byte
- buf[0] = 4
- t := uint32(pk.CreationTime.Unix())
- buf[1] = byte(t >> 24)
- buf[2] = byte(t >> 16)
- buf[3] = byte(t >> 8)
- buf[4] = byte(t)
- buf[5] = byte(pk.PubKeyAlgo)
-
- _, err = w.Write(buf[:])
- if err != nil {
- return
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- return writeMPIs(w, pk.n, pk.e)
- case PubKeyAlgoDSA:
- return writeMPIs(w, pk.p, pk.q, pk.g, pk.y)
- case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal:
- return writeMPIs(w, pk.p, pk.g, pk.y)
- case PubKeyAlgoECDSA:
- return pk.ec.serialize(w)
- case PubKeyAlgoEdDSA:
- return pk.edk.serialize(w)
- case PubKeyAlgoECDH:
- if err = pk.ec.serialize(w); err != nil {
- return
- }
- return pk.ecdh.serialize(w)
- }
- return errors.InvalidArgumentError("bad public-key algorithm")
-}
-
-// CanSign returns true iff this public key can generate signatures
-func (pk *PublicKey) CanSign() bool {
- return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal
-}
-
-// VerifySignature returns nil iff sig is a valid signature, made by this
-// public key, of the data hashed into signed. signed is mutated by this call.
-func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
- if !pk.CanSign() {
- return errors.InvalidArgumentError("public key cannot generate signatures")
- }
-
- signed.Write(sig.HashSuffix)
- hashBytes := signed.Sum(nil)
-
- // NOTE(maxtaco) 2016-08-22
- //
- // We used to do this:
- //
- // if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
- // return errors.SignatureError("hash tag doesn't match")
- // }
- //
- // But don't do anything in this case. Some GPGs generate bad
- // 2-byte hash prefixes, but GPG also doesn't seem to care on
- // import. See BrentMaxwell's key. I think it's safe to disable
- // this check!
-
- if pk.PubKeyAlgo != sig.PubKeyAlgo {
- return errors.InvalidArgumentError("public key and signature use different algorithms")
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
- err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes))
- if err != nil {
- return errors.SignatureError("RSA verification failure")
- }
- return nil
- case PubKeyAlgoDSA:
- dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
- if len(hashBytes) > subgroupSize {
- hashBytes = hashBytes[:subgroupSize]
- }
- if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
- return errors.SignatureError("DSA verification failure")
- }
- return nil
- case PubKeyAlgoECDSA:
- ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
- if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) {
- return errors.SignatureError("ECDSA verification failure")
- }
- return nil
- case PubKeyAlgoEdDSA:
- if !pk.edk.Verify(hashBytes, sig.EdDSASigR, sig.EdDSASigS) {
- return errors.SignatureError("EdDSA verification failure")
- }
- return nil
- default:
- return errors.SignatureError("Unsupported public key algorithm used in signature")
- }
- panic("unreachable")
-}
-
-// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
-// public key, of the data hashed into signed. signed is mutated by this call.
-func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
- if !pk.CanSign() {
- return errors.InvalidArgumentError("public key cannot generate signatures")
- }
-
- suffix := make([]byte, 5)
- suffix[0] = byte(sig.SigType)
- binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
- signed.Write(suffix)
- hashBytes := signed.Sum(nil)
-
- if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
- return errors.SignatureError("hash tag doesn't match")
- }
-
- if pk.PubKeyAlgo != sig.PubKeyAlgo {
- return errors.InvalidArgumentError("public key and signature use different algorithms")
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
- if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil {
- return errors.SignatureError("RSA verification failure")
- }
- return
- case PubKeyAlgoDSA:
- dsaPublicKey := pk.PublicKey.(*dsa.PublicKey)
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
- if len(hashBytes) > subgroupSize {
- hashBytes = hashBytes[:subgroupSize]
- }
- if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
- return errors.SignatureError("DSA verification failure")
- }
- return nil
- default:
- panic("shouldn't happen")
- }
- panic("unreachable")
-}
-
-// keySignatureHash returns a Hash of the message that needs to be signed for
-// pk to assert a subkey relationship to signed.
-func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
- if !hashFunc.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- h = hashFunc.New()
-
- updateKeySignatureHash(pk, signed, h)
-
- return
-}
-
-// updateKeySignatureHash does the actual hash updates for keySignatureHash.
-func updateKeySignatureHash(pk, signed signingKey, h hash.Hash) {
- // RFC 4880, section 5.2.4
- pk.SerializeSignaturePrefix(h)
- pk.serializeWithoutHeaders(h)
- signed.SerializeSignaturePrefix(h)
- signed.serializeWithoutHeaders(h)
-}
-
-// VerifyKeySignature returns nil iff sig is a valid signature, made by this
-// public key, of signed.
-func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
- h, err := keySignatureHash(pk, signed, sig.Hash)
- if err != nil {
- return err
- }
- if err = pk.VerifySignature(h, sig); err != nil {
- return err
- }
-
- if sig.FlagSign {
-
- // BUG(maxtaco)
- //
- // We should check for more than FlagsSign here, because if
- // you read keys.go, we can sometimes use signing subkeys even if they're
- // not explicitly flagged as such. However, so doing fails lots of currently
- // working tests, so I'm not going to do much here.
- //
- // In other words, we should have this disjunction in the condition above:
- //
- // || (!sig.FlagsValid && pk.PubKeyAlgo.CanSign()) {
- //
-
- // Signing subkeys must be cross-signed. See
- // https://www.gnupg.org/faq/subkey-cross-certify.html.
- if sig.EmbeddedSignature == nil {
- return errors.StructuralError("signing subkey is missing cross-signature")
- }
- // Verify the cross-signature. This is calculated over the same
- // data as the main signature, so we cannot just recursively
- // call signed.VerifyKeySignature(...)
- if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil {
- return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
- }
- if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
- return errors.StructuralError("error while verifying cross-signature: " + err.Error())
- }
- }
-
- return nil
-}
-
-func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
- if !hashFunc.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- h = hashFunc.New()
-
- // RFC 4880, section 5.2.4
- pk.SerializeSignaturePrefix(h)
- pk.serializeWithoutHeaders(h)
-
- return
-}
-
-// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
-// public key.
-func (pk *PublicKey) VerifyRevocationSignature(revokedKey *PublicKey, sig *Signature) (err error) {
- h, err := keyRevocationHash(revokedKey, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-type teeHash struct {
- h hash.Hash
-}
-
-func (t teeHash) Write(b []byte) (n int, err error) {
- fmt.Printf("hash -> %s %+v\n", string(b), b)
- return t.h.Write(b)
-}
-func (t teeHash) Sum(b []byte) []byte { return t.h.Sum(b) }
-func (t teeHash) Reset() { t.h.Reset() }
-func (t teeHash) Size() int { return t.h.Size() }
-func (t teeHash) BlockSize() int { return t.h.BlockSize() }
-
-// userIdSignatureHash returns a Hash of the message that needs to be signed
-// to assert that pk is a valid key for id.
-func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
- if !hashFunc.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- h = hashFunc.New()
-
- updateUserIdSignatureHash(id, pk, h)
-
- return
-}
-
-// updateUserIdSignatureHash does the actual hash updates for
-// userIdSignatureHash.
-func updateUserIdSignatureHash(id string, pk *PublicKey, h hash.Hash) {
- // RFC 4880, section 5.2.4
- pk.SerializeSignaturePrefix(h)
- pk.serializeWithoutHeaders(h)
-
- var buf [5]byte
- buf[0] = 0xb4
- buf[1] = byte(len(id) >> 24)
- buf[2] = byte(len(id) >> 16)
- buf[3] = byte(len(id) >> 8)
- buf[4] = byte(len(id))
- h.Write(buf[:])
- h.Write([]byte(id))
-
- return
-}
-
-// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
-// public key, that id is the identity of pub.
-func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
- h, err := userIdSignatureHash(id, pub, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
-// public key, that id is the identity of pub.
-func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) {
- h, err := userIdSignatureV3Hash(id, pub, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignatureV3(h, sig)
-}
-
-// KeyIdString returns the public key's fingerprint in capital hex
-// (e.g. "6C7EE1B8621CC013").
-func (pk *PublicKey) KeyIdString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[12:20])
-}
-
-// KeyIdShortString returns the short form of public key's fingerprint
-// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
-func (pk *PublicKey) KeyIdShortString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[16:20])
-}
-
-// A parsedMPI is used to store the contents of a big integer, along with the
-// bit length that was specified in the original input. This allows the MPI to
-// be reserialized exactly.
-type parsedMPI struct {
- bytes []byte
- bitLength uint16
-}
-
-// writeMPIs is a utility function for serializing several big integers to the
-// given Writer.
-func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) {
- for _, mpi := range mpis {
- err = writeMPI(w, mpi.bitLength, mpi.bytes)
- if err != nil {
- return
- }
- }
- return
-}
-
-// BitLength returns the bit length for the given public key. Used for
-// displaying key information, actual buffers and BigInts inside may
-// have non-matching different size if the key is invalid.
-func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- bitLength = pk.n.bitLength
- case PubKeyAlgoDSA:
- bitLength = pk.p.bitLength
- case PubKeyAlgoElGamal, PubKeyAlgoBadElGamal:
- bitLength = pk.p.bitLength
- case PubKeyAlgoECDH:
- ecdhPublicKey := pk.PublicKey.(*ecdh.PublicKey)
- bitLength = uint16(ecdhPublicKey.Curve.Params().BitSize)
- case PubKeyAlgoECDSA:
- ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
- bitLength = uint16(ecdsaPublicKey.Curve.Params().BitSize)
- case PubKeyAlgoEdDSA:
- // EdDSA only support ed25519 curves right now, just return
- // the length. Also, we don't have any PublicKey.Curve object
- // to look the size up from.
- bitLength = 256
- default:
- err = errors.InvalidArgumentError("bad public-key algorithm")
- }
- return
-}
-
-func (pk *PublicKey) ErrorIfDeprecated() error {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoBadElGamal:
- return errors.DeprecatedKeyError("ElGamal Encrypt or Sign (algo 20) is deprecated")
- default:
- return nil
- }
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go
deleted file mode 100644
index f75cbeab..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "crypto/md5"
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "math/big"
- "strconv"
- "time"
-
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/rsa"
-)
-
-// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and
-// should not be used for signing or encrypting. They are supported here only for
-// parsing version 3 key material and validating signatures.
-// See RFC 4880, section 5.5.2.
-type PublicKeyV3 struct {
- CreationTime time.Time
- DaysToExpire uint16
- PubKeyAlgo PublicKeyAlgorithm
- PublicKey *rsa.PublicKey
- Fingerprint [16]byte
- KeyId uint64
- IsSubkey bool
-
- n, e parsedMPI
-}
-
-// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey.
-// Included here for testing purposes only. RFC 4880, section 5.5.2:
-// "an implementation MUST NOT generate a V3 key, but MAY accept it."
-func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 {
- pk := &PublicKeyV3{
- CreationTime: creationTime,
- PublicKey: pub,
- n: FromBig(pub.N),
- e: FromBig(big.NewInt(int64(pub.E))),
- }
-
- pk.setFingerPrintAndKeyId()
- return pk
-}
-
-func (pk *PublicKeyV3) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.5.2
- var buf [8]byte
- if _, err = readFull(r, buf[:]); err != nil {
- return
- }
- if buf[0] < 2 || buf[0] > 3 {
- return errors.UnsupportedError("public key version")
- }
- pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
- pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7])
- pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7])
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- err = pk.parseRSA(r)
- default:
- err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
- }
- if err != nil {
- return
- }
-
- pk.setFingerPrintAndKeyId()
- return
-}
-
-func (pk *PublicKeyV3) setFingerPrintAndKeyId() {
- // RFC 4880, section 12.2
- fingerPrint := md5.New()
- fingerPrint.Write(pk.n.bytes)
- fingerPrint.Write(pk.e.bytes)
- fingerPrint.Sum(pk.Fingerprint[:0])
- pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:])
-}
-
-// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) {
- if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil {
- return
- }
- if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil {
- return
- }
-
- // RFC 4880 Section 12.2 requires the low 8 bytes of the
- // modulus to form the key id.
- if len(pk.n.bytes) < 8 {
- return errors.StructuralError("v3 public key modulus is too short")
- }
- if len(pk.e.bytes) > 7 {
- err = errors.UnsupportedError("large public exponent")
- return
- }
- rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)}
- // Warning: incompatibility with crypto/rsa: keybase fork uses
- // int64 public exponents instead of int32.
- for i := 0; i < len(pk.e.bytes); i++ {
- rsa.E <<= 8
- rsa.E |= int64(pk.e.bytes[i])
- }
- pk.PublicKey = rsa
- return
-}
-
-// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
-// The prefix is used when calculating a signature over this public key. See
-// RFC 4880, section 5.2.4.
-func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) {
- var pLength uint16
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- pLength += 2 + uint16(len(pk.n.bytes))
- pLength += 2 + uint16(len(pk.e.bytes))
- default:
- panic("unknown public key algorithm")
- }
- pLength += 6
- w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
- return
-}
-
-func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) {
- length := 8 // 8 byte header
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- length += 2 + len(pk.n.bytes)
- length += 2 + len(pk.e.bytes)
- default:
- panic("unknown public key algorithm")
- }
-
- packetType := packetTypePublicKey
- if pk.IsSubkey {
- packetType = packetTypePublicSubkey
- }
- if err = serializeHeader(w, packetType, length); err != nil {
- return
- }
- return pk.serializeWithoutHeaders(w)
-}
-
-// serializeWithoutHeaders marshals the PublicKey to w in the form of an
-// OpenPGP public key packet, not including the packet header.
-func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) {
- var buf [8]byte
- // Version 3
- buf[0] = 3
- // Creation time
- t := uint32(pk.CreationTime.Unix())
- buf[1] = byte(t >> 24)
- buf[2] = byte(t >> 16)
- buf[3] = byte(t >> 8)
- buf[4] = byte(t)
- // Days to expire
- buf[5] = byte(pk.DaysToExpire >> 8)
- buf[6] = byte(pk.DaysToExpire)
- // Public key algorithm
- buf[7] = byte(pk.PubKeyAlgo)
-
- if _, err = w.Write(buf[:]); err != nil {
- return
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- return writeMPIs(w, pk.n, pk.e)
- }
- return errors.InvalidArgumentError("bad public-key algorithm")
-}
-
-// CanSign returns true iff this public key can generate signatures
-func (pk *PublicKeyV3) CanSign() bool {
- return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly
-}
-
-// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
-// public key, of the data hashed into signed. signed is mutated by this call.
-func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
- if !pk.CanSign() {
- return errors.InvalidArgumentError("public key cannot generate signatures")
- }
-
- suffix := make([]byte, 5)
- suffix[0] = byte(sig.SigType)
- binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
- signed.Write(suffix)
- hashBytes := signed.Sum(nil)
-
- if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
- return errors.SignatureError("hash tag doesn't match")
- }
-
- if pk.PubKeyAlgo != sig.PubKeyAlgo {
- return errors.InvalidArgumentError("public key and signature use different algorithms")
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
- return errors.SignatureError("RSA verification failure")
- }
- return
- default:
- // V3 public keys only support RSA.
- panic("shouldn't happen")
- }
- panic("unreachable")
-}
-
-// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
-// public key, that id is the identity of pub.
-func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) {
- h, err := userIdSignatureV3Hash(id, pk, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignatureV3(h, sig)
-}
-
-// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this
-// public key, of signed.
-func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) {
- h, err := keySignatureHash(pk, signed, sig.Hash)
- if err != nil {
- return err
- }
- return pk.VerifySignatureV3(h, sig)
-}
-
-// userIdSignatureV3Hash returns a Hash of the message that needs to be signed
-// to assert that pk is a valid key for id.
-func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) {
- if !hfn.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- h = hfn.New()
-
- // RFC 4880, section 5.2.4
- pk.SerializeSignaturePrefix(h)
- pk.serializeWithoutHeaders(h)
-
- h.Write([]byte(id))
-
- return
-}
-
-// KeyIdString returns the public key's fingerprint in capital hex
-// (e.g. "6C7EE1B8621CC013").
-func (pk *PublicKeyV3) KeyIdString() string {
- return fmt.Sprintf("%X", pk.KeyId)
-}
-
-// KeyIdShortString returns the short form of public key's fingerprint
-// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
-func (pk *PublicKeyV3) KeyIdShortString() string {
- return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF)
-}
-
-// BitLength returns the bit length for the given public key.
-func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- bitLength = pk.n.bitLength
- default:
- err = errors.InvalidArgumentError("bad public-key algorithm")
- }
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go
deleted file mode 100644
index 957b3b89..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
-
- "github.com/keybase/go-crypto/openpgp/errors"
-)
-
-// Reader reads packets from an io.Reader and allows packets to be 'unread' so
-// that they result from the next call to Next.
-type Reader struct {
- q []Packet
- readers []io.Reader
-}
-
-// New io.Readers are pushed when a compressed or encrypted packet is processed
-// and recursively treated as a new source of packets. However, a carefully
-// crafted packet can trigger an infinite recursive sequence of packets. See
-// http://mumble.net/~campbell/misc/pgp-quine
-// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
-// This constant limits the number of recursive packets that may be pushed.
-const maxReaders = 32
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown packet types are skipped.
-func (r *Reader) Next() (p Packet, err error) {
- if len(r.q) > 0 {
- p = r.q[len(r.q)-1]
- r.q = r.q[:len(r.q)-1]
- return
- }
-
- for len(r.readers) > 0 {
- p, err = Read(r.readers[len(r.readers)-1])
- if err == nil {
- return
- }
- if err == io.EOF {
- r.readers = r.readers[:len(r.readers)-1]
- continue
- }
- if _, ok := err.(errors.UnknownPacketTypeError); !ok {
- return nil, err
- }
- }
- return nil, io.EOF
-}
-
-// Push causes the Reader to start reading from a new io.Reader. When an EOF
-// error is seen from the new io.Reader, it is popped and the Reader continues
-// to read from the next most recent io.Reader. Push returns a StructuralError
-// if pushing the reader would exceed the maximum recursion level, otherwise it
-// returns nil.
-func (r *Reader) Push(reader io.Reader) (err error) {
- if len(r.readers) >= maxReaders {
- return errors.StructuralError("too many layers of packets")
- }
- r.readers = append(r.readers, reader)
- return nil
-}
-
-// Unread causes the given Packet to be returned from the next call to Next.
-func (r *Reader) Unread(p Packet) {
- r.q = append(r.q, p)
-}
-
-func NewReader(r io.Reader) *Reader {
- return &Reader{
- q: nil,
- readers: []io.Reader{r},
- }
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go
deleted file mode 100644
index 383a8a6a..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go
+++ /dev/null
@@ -1,923 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/dsa"
- "crypto/ecdsa"
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/s2k"
- "github.com/keybase/go-crypto/rsa"
-)
-
-const (
- // See RFC 4880, section 5.2.3.21 for details.
- KeyFlagCertify = 1 << iota
- KeyFlagSign
- KeyFlagEncryptCommunications
- KeyFlagEncryptStorage
-)
-
-// Signer can be implemented by application code to do actual signing.
-type Signer interface {
- hash.Hash
- Sign(sig *Signature) error
- KeyId() uint64
- PublicKeyAlgo() PublicKeyAlgorithm
-}
-
-// RevocationKey represents designated revoker packet. See RFC 4880
-// section 5.2.3.15 for details.
-type RevocationKey struct {
- Class byte
- PublicKeyAlgo PublicKeyAlgorithm
- Fingerprint []byte
-}
-
-// KeyFlagBits holds boolean whether any usage flags were provided in
-// the signature and BitField with KeyFlag* flags.
-type KeyFlagBits struct {
- Valid bool
- BitField byte
-}
-
-// Signature represents a signature. See RFC 4880, section 5.2.
-type Signature struct {
- SigType SignatureType
- PubKeyAlgo PublicKeyAlgorithm
- Hash crypto.Hash
-
- // HashSuffix is extra data that is hashed in after the signed data.
- HashSuffix []byte
- // HashTag contains the first two bytes of the hash for fast rejection
- // of bad signed data.
- HashTag [2]byte
- CreationTime time.Time
-
- RSASignature parsedMPI
- DSASigR, DSASigS parsedMPI
- ECDSASigR, ECDSASigS parsedMPI
- EdDSASigR, EdDSASigS parsedMPI
-
- // rawSubpackets contains the unparsed subpackets, in order.
- rawSubpackets []outputSubpacket
-
- // The following are optional so are nil when not included in the
- // signature.
-
- SigLifetimeSecs, KeyLifetimeSecs *uint32
- PreferredSymmetric, PreferredHash, PreferredCompression []uint8
- PreferredKeyServer string
- IssuerKeyId *uint64
- IsPrimaryId *bool
- IssuerFingerprint []byte
-
- // FlagsValid is set if any flags were given. See RFC 4880, section
- // 5.2.3.21 for details.
- FlagsValid bool
- FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool
-
- // RevocationReason is set if this signature has been revoked.
- // See RFC 4880, section 5.2.3.23 for details.
- RevocationReason *uint8
- RevocationReasonText string
-
- // PolicyURI is optional. See RFC 4880, Section 5.2.3.20 for details
- PolicyURI string
-
- // Regex is a regex that can match a PGP UID. See RFC 4880, 5.2.3.14 for details
- Regex string
-
- // MDC is set if this signature has a feature packet that indicates
- // support for MDC subpackets.
- MDC bool
-
- // EmbeddedSignature, if non-nil, is a signature of the parent key, by
- // this key. This prevents an attacker from claiming another's signing
- // subkey as their own.
- EmbeddedSignature *Signature
-
- // StubbedOutCriticalError is not fail-stop, since it shouldn't break key parsing
- // when appearing in WoT-style cross signatures. But it should prevent a signature
- // from being applied to a primary or subkey.
- StubbedOutCriticalError error
-
- // DesignaterRevoker will be present if this signature certifies a
- // designated revoking key id (3rd party key that can sign
- // revocation for this key).
- DesignatedRevoker *RevocationKey
-
- outSubpackets []outputSubpacket
-}
-
-func (sig *Signature) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.2.3
- var buf [5]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0] != 4 {
- err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
- return
- }
-
- _, err = readFull(r, buf[:5])
- if err != nil {
- return
- }
- sig.SigType = SignatureType(buf[0])
- sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA:
- default:
- err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
- return
- }
-
- var ok bool
- sig.Hash, ok = s2k.HashIdToHash(buf[2])
- if !ok {
- return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
- }
-
- hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
- l := 6 + hashedSubpacketsLength
- sig.HashSuffix = make([]byte, l+6)
- sig.HashSuffix[0] = 4
- copy(sig.HashSuffix[1:], buf[:5])
- hashedSubpackets := sig.HashSuffix[6:l]
- _, err = readFull(r, hashedSubpackets)
- if err != nil {
- return
- }
- // See RFC 4880, section 5.2.4
- trailer := sig.HashSuffix[l:]
- trailer[0] = 4
- trailer[1] = 0xff
- trailer[2] = uint8(l >> 24)
- trailer[3] = uint8(l >> 16)
- trailer[4] = uint8(l >> 8)
- trailer[5] = uint8(l)
-
- err = parseSignatureSubpackets(sig, hashedSubpackets, true)
- if err != nil {
- return
- }
-
- _, err = readFull(r, buf[:2])
- if err != nil {
- return
- }
- unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1])
- unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
- _, err = readFull(r, unhashedSubpackets)
- if err != nil {
- return
- }
- err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
- if err != nil {
- return
- }
-
- _, err = readFull(r, sig.HashTag[:2])
- if err != nil {
- return
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
- case PubKeyAlgoDSA:
- sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r)
- if err == nil {
- sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
- }
- case PubKeyAlgoEdDSA:
- sig.EdDSASigR.bytes, sig.EdDSASigR.bitLength, err = readMPI(r)
- if err == nil {
- sig.EdDSASigS.bytes, sig.EdDSASigS.bitLength, err = readMPI(r)
- }
- case PubKeyAlgoECDSA:
- sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r)
- if err == nil {
- sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r)
- }
- default:
- panic("unreachable")
- }
- return
-}
-
-// parseSignatureSubpackets parses subpackets of the main signature packet. See
-// RFC 4880, section 5.2.3.1.
-func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
- for len(subpackets) > 0 {
- subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
- if err != nil {
- return
- }
- }
-
- if sig.CreationTime.IsZero() {
- err = errors.StructuralError("no creation time in signature")
- }
-
- return
-}
-
-type signatureSubpacketType uint8
-
-const (
- creationTimeSubpacket signatureSubpacketType = 2
- signatureExpirationSubpacket signatureSubpacketType = 3
- regularExpressionSubpacket signatureSubpacketType = 6
- keyExpirationSubpacket signatureSubpacketType = 9
- prefSymmetricAlgosSubpacket signatureSubpacketType = 11
- revocationKey signatureSubpacketType = 12
- issuerSubpacket signatureSubpacketType = 16
- prefHashAlgosSubpacket signatureSubpacketType = 21
- prefCompressionSubpacket signatureSubpacketType = 22
- prefKeyServerSubpacket signatureSubpacketType = 24
- primaryUserIdSubpacket signatureSubpacketType = 25
- policyURISubpacket signatureSubpacketType = 26
- keyFlagsSubpacket signatureSubpacketType = 27
- reasonForRevocationSubpacket signatureSubpacketType = 29
- featuresSubpacket signatureSubpacketType = 30
- embeddedSignatureSubpacket signatureSubpacketType = 32
- issuerFingerprint signatureSubpacketType = 33
-)
-
-// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
-func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
- // RFC 4880, section 5.2.3.1
- var (
- length uint32
- packetType signatureSubpacketType
- isCritical bool
- )
- switch {
- case subpacket[0] < 192:
- length = uint32(subpacket[0])
- subpacket = subpacket[1:]
- case subpacket[0] < 255:
- if len(subpacket) < 2 {
- goto Truncated
- }
- length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
- subpacket = subpacket[2:]
- default:
- if len(subpacket) < 5 {
- goto Truncated
- }
- length = uint32(subpacket[1])<<24 |
- uint32(subpacket[2])<<16 |
- uint32(subpacket[3])<<8 |
- uint32(subpacket[4])
- subpacket = subpacket[5:]
- }
- if length > uint32(len(subpacket)) {
- goto Truncated
- }
- rest = subpacket[length:]
- subpacket = subpacket[:length]
- if len(subpacket) == 0 {
- err = errors.StructuralError("zero length signature subpacket")
- return
- }
- packetType = signatureSubpacketType(subpacket[0] & 0x7f)
- isCritical = subpacket[0]&0x80 == 0x80
- subpacket = subpacket[1:]
- sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
- switch packetType {
- case creationTimeSubpacket:
- if !isHashed {
- err = errors.StructuralError("signature creation time in non-hashed area")
- return
- }
- if len(subpacket) != 4 {
- err = errors.StructuralError("signature creation time not four bytes")
- return
- }
- t := binary.BigEndian.Uint32(subpacket)
- sig.CreationTime = time.Unix(int64(t), 0)
- case signatureExpirationSubpacket:
- // Signature expiration time, section 5.2.3.10
- if !isHashed {
- return
- }
- if len(subpacket) != 4 {
- err = errors.StructuralError("expiration subpacket with bad length")
- return
- }
- sig.SigLifetimeSecs = new(uint32)
- *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case keyExpirationSubpacket:
- // Key expiration time, section 5.2.3.6
- if !isHashed {
- return
- }
- if len(subpacket) != 4 {
- err = errors.StructuralError("key expiration subpacket with bad length")
- return
- }
- sig.KeyLifetimeSecs = new(uint32)
- *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case prefSymmetricAlgosSubpacket:
- // Preferred symmetric algorithms, section 5.2.3.7
- if !isHashed {
- return
- }
- sig.PreferredSymmetric = make([]byte, len(subpacket))
- copy(sig.PreferredSymmetric, subpacket)
- case issuerSubpacket:
- // Issuer, section 5.2.3.5
- if len(subpacket) != 8 {
- err = errors.StructuralError("issuer subpacket with bad length")
- return
- }
- sig.IssuerKeyId = new(uint64)
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
- case prefHashAlgosSubpacket:
- // Preferred hash algorithms, section 5.2.3.8
- if !isHashed {
- return
- }
- sig.PreferredHash = make([]byte, len(subpacket))
- copy(sig.PreferredHash, subpacket)
- case prefCompressionSubpacket:
- // Preferred compression algorithms, section 5.2.3.9
- if !isHashed {
- return
- }
- sig.PreferredCompression = make([]byte, len(subpacket))
- copy(sig.PreferredCompression, subpacket)
- case primaryUserIdSubpacket:
- // Primary User ID, section 5.2.3.19
- if !isHashed {
- return
- }
- if len(subpacket) != 1 {
- err = errors.StructuralError("primary user id subpacket with bad length")
- return
- }
- sig.IsPrimaryId = new(bool)
- if subpacket[0] > 0 {
- *sig.IsPrimaryId = true
- }
- case keyFlagsSubpacket:
- // Key flags, section 5.2.3.21
- if !isHashed {
- return
- }
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty key flags subpacket")
- return
- }
- if subpacket[0] != 0 {
- sig.FlagsValid = true
- if subpacket[0]&KeyFlagCertify != 0 {
- sig.FlagCertify = true
- }
- if subpacket[0]&KeyFlagSign != 0 {
- sig.FlagSign = true
- }
- if subpacket[0]&KeyFlagEncryptCommunications != 0 {
- sig.FlagEncryptCommunications = true
- }
- if subpacket[0]&KeyFlagEncryptStorage != 0 {
- sig.FlagEncryptStorage = true
- }
- }
- case reasonForRevocationSubpacket:
- // Reason For Revocation, section 5.2.3.23
- if !isHashed {
- return
- }
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty revocation reason subpacket")
- return
- }
- sig.RevocationReason = new(uint8)
- *sig.RevocationReason = subpacket[0]
- sig.RevocationReasonText = string(subpacket[1:])
- case featuresSubpacket:
- // Features subpacket, section 5.2.3.24 specifies a very general
- // mechanism for OpenPGP implementations to signal support for new
- // features. In practice, the subpacket is used exclusively to
- // indicate support for MDC-protected encryption.
- sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1
- case embeddedSignatureSubpacket:
- // Only usage is in signatures that cross-certify
- // signing subkeys. section 5.2.3.26 describes the
- // format, with its usage described in section 11.1
- if sig.EmbeddedSignature != nil {
- err = errors.StructuralError("Cannot have multiple embedded signatures")
- return
- }
- sig.EmbeddedSignature = new(Signature)
- // Embedded signatures are required to be v4 signatures see
- // section 12.1. However, we only parse v4 signatures in this
- // file anyway.
- if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
- return nil, err
- }
- if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
- return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
- }
- case policyURISubpacket:
- // See RFC 4880, Section 5.2.3.20
- sig.PolicyURI = string(subpacket[:])
- case regularExpressionSubpacket:
- sig.Regex = string(subpacket[:])
- if isCritical {
- sig.StubbedOutCriticalError = errors.UnsupportedError("regex support is stubbed out")
- }
- case prefKeyServerSubpacket:
- sig.PreferredKeyServer = string(subpacket[:])
- case issuerFingerprint:
- // The first byte is how many bytes the fingerprint is, but we'll just
- // read until the end of the subpacket, so we'll ignore it.
- sig.IssuerFingerprint = append([]byte{}, subpacket[1:]...)
- case revocationKey:
- // Authorizes the specified key to issue revocation signatures
- // for a key.
-
- // TODO: Class octet must have bit 0x80 set. If the bit 0x40
- // is set, then this means that the revocation information is
- // sensitive.
- sig.DesignatedRevoker = &RevocationKey{
- Class: subpacket[0],
- PublicKeyAlgo: PublicKeyAlgorithm(subpacket[1]),
- Fingerprint: append([]byte{}, subpacket[2:]...),
- }
- default:
- if isCritical {
- err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
- return
- }
- }
- return
-
-Truncated:
- err = errors.StructuralError("signature subpacket truncated")
- return
-}
-
-// subpacketLengthLength returns the length, in bytes, of an encoded length value.
-func subpacketLengthLength(length int) int {
- if length < 192 {
- return 1
- }
- if length < 16320 {
- return 2
- }
- return 5
-}
-
-// serializeSubpacketLength marshals the given length into to.
-func serializeSubpacketLength(to []byte, length int) int {
- // RFC 4880, Section 4.2.2.
- if length < 192 {
- to[0] = byte(length)
- return 1
- }
- if length < 16320 {
- length -= 192
- to[0] = byte((length >> 8) + 192)
- to[1] = byte(length)
- return 2
- }
- to[0] = 255
- to[1] = byte(length >> 24)
- to[2] = byte(length >> 16)
- to[3] = byte(length >> 8)
- to[4] = byte(length)
- return 5
-}
-
-// subpacketsLength returns the serialized length, in bytes, of the given
-// subpackets.
-func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- length += subpacketLengthLength(len(subpacket.contents) + 1)
- length += 1 // type byte
- length += len(subpacket.contents)
- }
- }
- return
-}
-
-// serializeSubpackets marshals the given subpackets into to.
-func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- n := serializeSubpacketLength(to, len(subpacket.contents)+1)
- to[n] = byte(subpacket.subpacketType)
- to = to[1+n:]
- n = copy(to, subpacket.contents)
- to = to[n:]
- }
- }
- return
-}
-
-// KeyExpired returns whether sig is a self-signature of a key that has
-// expired.
-func (sig *Signature) KeyExpired(currentTime time.Time) bool {
- if sig.KeyLifetimeSecs == nil {
- return false
- }
- expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
- return currentTime.After(expiry)
-}
-
-// ExpiresBeforeOther checks if other signature has expiration at
-// later date than sig.
-func (sig *Signature) ExpiresBeforeOther(other *Signature) bool {
- if sig.KeyLifetimeSecs == nil {
- // This sig never expires, or has infinitely long expiration
- // time.
- return false
- } else if other.KeyLifetimeSecs == nil {
- // This sig expires at some non-infinite point, but the other
- // sig never expires.
- return true
- }
-
- getExpiryDate := func(s *Signature) time.Time {
- return s.CreationTime.Add(time.Duration(*s.KeyLifetimeSecs) * time.Second)
- }
-
- return getExpiryDate(other).After(getExpiryDate(sig))
-}
-
-// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
-func (sig *Signature) buildHashSuffix() (err error) {
- hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
-
- var ok bool
- l := 6 + hashedSubpacketsLen
- sig.HashSuffix = make([]byte, l+6)
- sig.HashSuffix[0] = 4
- sig.HashSuffix[1] = uint8(sig.SigType)
- sig.HashSuffix[2] = uint8(sig.PubKeyAlgo)
- sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
- if !ok {
- sig.HashSuffix = nil
- return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
- }
- sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
- sig.HashSuffix[5] = byte(hashedSubpacketsLen)
- serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true)
- trailer := sig.HashSuffix[l:]
- trailer[0] = 4
- trailer[1] = 0xff
- trailer[2] = byte(l >> 24)
- trailer[3] = byte(l >> 16)
- trailer[4] = byte(l >> 8)
- trailer[5] = byte(l)
- return
-}
-
-func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
- err = sig.buildHashSuffix()
- if err != nil {
- return
- }
-
- h.Write(sig.HashSuffix)
- digest = h.Sum(nil)
- copy(sig.HashTag[:], digest)
- return
-}
-
-// Sign signs a message with a private key. The hash, h, must contain
-// the hash of the message to be signed and will be mutated by this function.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
- signer, hashIsSigner := h.(Signer)
-
- if !hashIsSigner && (priv == nil || priv.PrivateKey == nil) {
- err = errors.InvalidArgumentError("attempting to sign with nil PrivateKey")
- return
- }
-
- sig.outSubpackets = sig.buildSubpackets()
- digest, err := sig.signPrepareHash(h)
- if err != nil {
- return
- }
-
- if hashIsSigner {
- err = signer.Sign(sig)
- return
- }
-
- // Parameter check, if this is wrong we will make a signature but
- // not serialize it later.
- if sig.PubKeyAlgo != priv.PubKeyAlgo {
- err = errors.InvalidArgumentError("signature pub key algo does not match priv key")
- return
- }
-
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sig.RSASignature.bytes, err = rsa.SignPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), sig.Hash, digest)
- sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes))
- case PubKeyAlgoDSA:
- dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
-
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
- if len(digest) > subgroupSize {
- digest = digest[:subgroupSize]
- }
- r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
- if err != nil {
- return err
- }
- sig.DSASigR.bytes = r.Bytes()
- sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes))
- sig.DSASigS.bytes = s.Bytes()
- sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
- case PubKeyAlgoECDSA:
- r, s, err := ecdsa.Sign(config.Random(), priv.PrivateKey.(*ecdsa.PrivateKey), digest)
- if err != nil {
- return err
- }
- sig.ECDSASigR = FromBig(r)
- sig.ECDSASigS = FromBig(s)
- case PubKeyAlgoEdDSA:
- r, s, err := priv.PrivateKey.(*EdDSAPrivateKey).Sign(digest)
- if err != nil {
- return err
- }
- sig.EdDSASigR = FromBytes(r)
- sig.EdDSASigS = FromBytes(s)
- default:
- err = errors.UnsupportedError("public key algorithm for signing: " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
-
- return
-}
-
-// SignUserId computes a signature from priv, asserting that pub is a valid
-// key for the identity id. On success, the signature is stored in sig. Call
-// Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
- h, err := userIdSignatureHash(id, pub, sig.Hash)
- if err != nil {
- return err
- }
- return sig.Sign(h, priv, config)
-}
-
-// SignUserIdWithSigner computes a signature from priv, asserting that pub is a
-// valid key for the identity id. On success, the signature is stored in sig.
-// Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignUserIdWithSigner(id string, pub *PublicKey, s Signer, config *Config) error {
- updateUserIdSignatureHash(id, pub, s)
-
- return sig.Sign(s, nil, config)
-}
-
-// SignKey computes a signature from priv, asserting that pub is a subkey. On
-// success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash)
- if err != nil {
- return err
- }
- return sig.Sign(h, priv, config)
-}
-
-// SignKeyWithSigner computes a signature using s, asserting that
-// signeePubKey is a subkey. On success, the signature is stored in sig. Call
-// Serialize to write it out. If config is nil, sensible defaults will be used.
-func (sig *Signature) SignKeyWithSigner(signeePubKey *PublicKey, signerPubKey *PublicKey, s Signer, config *Config) error {
- updateKeySignatureHash(signerPubKey, signeePubKey, s)
-
- return sig.Sign(s, nil, config)
-}
-
-// CrossSignKey creates PrimaryKeyBinding signature in sig.EmbeddedSignature by
-// signing `primary` key's hash using `priv` subkey private key. Primary public
-// key is the `signee` here.
-func (sig *Signature) CrossSignKey(primary *PublicKey, priv *PrivateKey, config *Config) error {
- if len(sig.outSubpackets) > 0 {
- return fmt.Errorf("outSubpackets already exists, looks like CrossSignKey was called after Sign")
- }
-
- sig.EmbeddedSignature = &Signature{
- CreationTime: sig.CreationTime,
- SigType: SigTypePrimaryKeyBinding,
- PubKeyAlgo: priv.PubKeyAlgo,
- Hash: sig.Hash,
- }
-
- h, err := keySignatureHash(primary, &priv.PublicKey, sig.Hash)
- if err != nil {
- return err
- }
- return sig.EmbeddedSignature.Sign(h, priv, config)
-}
-
-// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
-// called first.
-func (sig *Signature) Serialize(w io.Writer) (err error) {
- if len(sig.outSubpackets) == 0 {
- sig.outSubpackets = sig.rawSubpackets
- }
- if sig.RSASignature.bytes == nil &&
- sig.DSASigR.bytes == nil &&
- sig.ECDSASigR.bytes == nil &&
- sig.EdDSASigR.bytes == nil {
- return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
- }
-
- sigLength := 0
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sigLength = 2 + len(sig.RSASignature.bytes)
- case PubKeyAlgoDSA:
- sigLength = 2 + len(sig.DSASigR.bytes)
- sigLength += 2 + len(sig.DSASigS.bytes)
- case PubKeyAlgoEdDSA:
- sigLength = 2 + len(sig.EdDSASigR.bytes)
- sigLength += 2 + len(sig.EdDSASigS.bytes)
- case PubKeyAlgoECDSA:
- sigLength = 2 + len(sig.ECDSASigR.bytes)
- sigLength += 2 + len(sig.ECDSASigS.bytes)
- default:
- panic("impossible")
- }
-
- unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
- length := len(sig.HashSuffix) - 6 /* trailer not included */ +
- 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
- 2 /* hash tag */ + sigLength
- err = serializeHeader(w, packetTypeSignature, length)
- if err != nil {
- return
- }
-
- _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6])
- if err != nil {
- return
- }
-
- unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
- unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
- unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
- serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
-
- _, err = w.Write(unhashedSubpackets)
- if err != nil {
- return
- }
- _, err = w.Write(sig.HashTag[:])
- if err != nil {
- return
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- err = writeMPIs(w, sig.RSASignature)
- case PubKeyAlgoDSA:
- err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
- case PubKeyAlgoEdDSA:
- err = writeMPIs(w, sig.EdDSASigR, sig.EdDSASigS)
- case PubKeyAlgoECDSA:
- err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS)
- default:
- panic("impossible")
- }
- return
-}
-
-// outputSubpacket represents a subpacket to be marshaled.
-type outputSubpacket struct {
- hashed bool // true if this subpacket is in the hashed area.
- subpacketType signatureSubpacketType
- isCritical bool
- contents []byte
-}
-
-func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
- creationTime := make([]byte, 4)
- binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
- subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
-
- if sig.IssuerKeyId != nil {
- keyId := make([]byte, 8)
- binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
- subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
- }
-
- if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
- sigLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
- }
-
- // Key flags may only appear in self-signatures or certification signatures.
-
- if sig.FlagsValid {
- subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{sig.GetKeyFlags().BitField}})
- }
-
- // The following subpackets may only appear in self-signatures
-
- if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
- keyLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
- }
-
- if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
- subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
- }
-
- if len(sig.PreferredSymmetric) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
- }
-
- if len(sig.PreferredHash) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
- }
-
- if len(sig.PreferredCompression) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
- }
-
- if sig.EmbeddedSignature != nil {
- buf := bytes.NewBuffer(nil)
- if err := sig.EmbeddedSignature.Serialize(buf); err == nil {
- byteContent := buf.Bytes()[2:] // skip 2-byte length header
- subpackets = append(subpackets, outputSubpacket{false, embeddedSignatureSubpacket, true, byteContent})
- }
- }
-
- return
-}
-
-func (sig *Signature) GetKeyFlags() (ret KeyFlagBits) {
- if !sig.FlagsValid {
- return ret
- }
-
- ret.Valid = true
- if sig.FlagCertify {
- ret.BitField |= KeyFlagCertify
- }
- if sig.FlagSign {
- ret.BitField |= KeyFlagSign
- }
- if sig.FlagEncryptCommunications {
- ret.BitField |= KeyFlagEncryptCommunications
- }
- if sig.FlagEncryptStorage {
- ret.BitField |= KeyFlagEncryptStorage
- }
- return ret
-}
-
-func (f *KeyFlagBits) HasFlagCertify() bool {
- return f.BitField&KeyFlagCertify != 0
-}
-
-func (f *KeyFlagBits) HasFlagSign() bool {
- return f.BitField&KeyFlagSign != 0
-}
-
-func (f *KeyFlagBits) HasFlagEncryptCommunications() bool {
- return f.BitField&KeyFlagEncryptCommunications != 0
-}
-
-func (f *KeyFlagBits) HasFlagEncryptStorage() bool {
- return f.BitField&KeyFlagEncryptStorage != 0
-}
-
-func (f *KeyFlagBits) Merge(other KeyFlagBits) {
- if other.Valid {
- f.Valid = true
- f.BitField |= other.BitField
- }
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go
deleted file mode 100644
index dfca651b..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "encoding/binary"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/s2k"
-)
-
-// SignatureV3 represents older version 3 signatures. These signatures are less secure
-// than version 4 and should not be used to create new signatures. They are included
-// here for backwards compatibility to read and validate with older key material.
-// See RFC 4880, section 5.2.2.
-type SignatureV3 struct {
- SigType SignatureType
- CreationTime time.Time
- IssuerKeyId uint64
- PubKeyAlgo PublicKeyAlgorithm
- Hash crypto.Hash
- HashTag [2]byte
-
- RSASignature parsedMPI
- DSASigR, DSASigS parsedMPI
-}
-
-func (sig *SignatureV3) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.2.2
- var buf [8]byte
- if _, err = readFull(r, buf[:1]); err != nil {
- return
- }
- if buf[0] < 2 || buf[0] > 3 {
- err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
- return
- }
- if _, err = readFull(r, buf[:1]); err != nil {
- return
- }
- if buf[0] != 5 {
- err = errors.UnsupportedError(
- "invalid hashed material length " + strconv.Itoa(int(buf[0])))
- return
- }
-
- // Read hashed material: signature type + creation time
- if _, err = readFull(r, buf[:5]); err != nil {
- return
- }
- sig.SigType = SignatureType(buf[0])
- t := binary.BigEndian.Uint32(buf[1:5])
- sig.CreationTime = time.Unix(int64(t), 0)
-
- // Eight-octet Key ID of signer.
- if _, err = readFull(r, buf[:8]); err != nil {
- return
- }
- sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:])
-
- // Public-key and hash algorithm
- if _, err = readFull(r, buf[:2]); err != nil {
- return
- }
- sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0])
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
- default:
- err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
- return
- }
- var ok bool
- if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok {
- return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
- }
-
- // Two-octet field holding left 16 bits of signed hash value.
- if _, err = readFull(r, sig.HashTag[:2]); err != nil {
- return
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
- case PubKeyAlgoDSA:
- if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil {
- return
- }
- sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
- default:
- panic("unreachable")
- }
- return
-}
-
-// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
-// called first.
-func (sig *SignatureV3) Serialize(w io.Writer) (err error) {
- buf := make([]byte, 8)
-
- // Write the sig type and creation time
- buf[0] = byte(sig.SigType)
- binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix()))
- if _, err = w.Write(buf[:5]); err != nil {
- return
- }
-
- // Write the issuer long key ID
- binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId)
- if _, err = w.Write(buf[:8]); err != nil {
- return
- }
-
- // Write public key algorithm, hash ID, and hash value
- buf[0] = byte(sig.PubKeyAlgo)
- hashId, ok := s2k.HashToHashId(sig.Hash)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash))
- }
- buf[1] = hashId
- copy(buf[2:4], sig.HashTag[:])
- if _, err = w.Write(buf[:4]); err != nil {
- return
- }
-
- if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil {
- return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- err = writeMPIs(w, sig.RSASignature)
- case PubKeyAlgoDSA:
- err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
- default:
- panic("impossible")
- }
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go
deleted file mode 100644
index b92c1d77..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "io"
- "strconv"
-
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/s2k"
-)
-
-// This is the largest session key that we'll support. Since no 512-bit cipher
-// has even been seriously used, this is comfortably large.
-const maxSessionKeySizeInBytes = 64
-
-// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
-// 4880, section 5.3.
-type SymmetricKeyEncrypted struct {
- CipherFunc CipherFunction
- s2k func(out, in []byte)
- encryptedKey []byte
-}
-
-const symmetricKeyEncryptedVersion = 4
-
-func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
- // RFC 4880, section 5.3.
- var buf [2]byte
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- if buf[0] != symmetricKeyEncryptedVersion {
- return errors.UnsupportedError("SymmetricKeyEncrypted version")
- }
- ske.CipherFunc = CipherFunction(buf[1])
-
- if ske.CipherFunc.KeySize() == 0 {
- return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
- }
-
- var err error
- ske.s2k, err = s2k.Parse(r)
- if err != nil {
- return err
- }
- if ske.s2k == nil {
- return errors.UnsupportedError("can't use dummy S2K for symmetric key encryption")
- }
-
- encryptedKey := make([]byte, maxSessionKeySizeInBytes)
- // The session key may follow. We just have to try and read to find
- // out. If it exists then we limit it to maxSessionKeySizeInBytes.
- n, err := readFull(r, encryptedKey)
- if err != nil && err != io.ErrUnexpectedEOF {
- return err
- }
-
- if n != 0 {
- if n == maxSessionKeySizeInBytes {
- return errors.UnsupportedError("oversized encrypted session key")
- }
- ske.encryptedKey = encryptedKey[:n]
- }
-
- return nil
-}
-
-// Decrypt attempts to decrypt an encrypted session key and returns the key and
-// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
-// packet.
-func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
- key := make([]byte, ske.CipherFunc.KeySize())
- ske.s2k(key, passphrase)
-
- if len(ske.encryptedKey) == 0 {
- return key, ske.CipherFunc, nil
- }
-
- // the IV is all zeros
- iv := make([]byte, ske.CipherFunc.blockSize())
- c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
- plaintextKey := make([]byte, len(ske.encryptedKey))
- c.XORKeyStream(plaintextKey, ske.encryptedKey)
- cipherFunc := CipherFunction(plaintextKey[0])
- if cipherFunc.blockSize() == 0 {
- return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
- }
- plaintextKey = plaintextKey[1:]
- if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() {
- return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " +
- "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")")
- }
- return plaintextKey, cipherFunc, nil
-}
-
-// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The
-// packet contains a random session key, encrypted by a key derived from the
-// given passphrase. The session key is returned and must be passed to
-// SerializeSymmetricallyEncrypted.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
- cipherFunc := config.Cipher()
- keySize := cipherFunc.KeySize()
- if keySize == 0 {
- return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
- }
-
- s2kBuf := new(bytes.Buffer)
- keyEncryptingKey := make([]byte, keySize)
- // s2k.Serialize salts and stretches the passphrase, and writes the
- // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
- err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
- if err != nil {
- return
- }
- s2kBytes := s2kBuf.Bytes()
-
- packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
- err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
- if err != nil {
- return
- }
-
- var buf [2]byte
- buf[0] = symmetricKeyEncryptedVersion
- buf[1] = byte(cipherFunc)
- _, err = w.Write(buf[:])
- if err != nil {
- return
- }
- _, err = w.Write(s2kBytes)
- if err != nil {
- return
- }
-
- sessionKey := make([]byte, keySize)
- _, err = io.ReadFull(config.Random(), sessionKey)
- if err != nil {
- return
- }
- iv := make([]byte, cipherFunc.blockSize())
- c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
- encryptedCipherAndKey := make([]byte, keySize+1)
- c.XORKeyStream(encryptedCipherAndKey, buf[1:])
- c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
- _, err = w.Write(encryptedCipherAndKey)
- if err != nil {
- return
- }
-
- key = sessionKey
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go
deleted file mode 100644
index fd4f8f01..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/cipher"
- "crypto/sha1"
- "crypto/subtle"
- "hash"
- "io"
- "strconv"
-
- "github.com/keybase/go-crypto/openpgp/errors"
-)
-
-// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
-// encrypted contents will consist of more OpenPGP packets. See RFC 4880,
-// sections 5.7 and 5.13.
-type SymmetricallyEncrypted struct {
- MDC bool // true iff this is a type 18 packet and thus has an embedded MAC.
- contents io.Reader
- prefix []byte
-}
-
-const symmetricallyEncryptedVersion = 1
-
-func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
- if se.MDC {
- // See RFC 4880, section 5.13.
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
- if buf[0] != symmetricallyEncryptedVersion {
- return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
- }
- }
- se.contents = r
- return nil
-}
-
-// Decrypt returns a ReadCloser, from which the decrypted contents of the
-// packet can be read. An incorrect key can, with high probability, be detected
-// immediately and this will result in a KeyIncorrect error being returned.
-func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
- keySize := c.KeySize()
- if keySize == 0 {
- return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
- }
- if len(key) != keySize {
- return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
- }
-
- if se.prefix == nil {
- se.prefix = make([]byte, c.blockSize()+2)
- _, err := readFull(se.contents, se.prefix)
- if err != nil {
- return nil, err
- }
- } else if len(se.prefix) != c.blockSize()+2 {
- return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
- }
-
- ocfbResync := OCFBResync
- if se.MDC {
- // MDC packets use a different form of OCFB mode.
- ocfbResync = OCFBNoResync
- }
-
- s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
- if s == nil {
- return nil, errors.ErrKeyIncorrect
- }
-
- plaintext := cipher.StreamReader{S: s, R: se.contents}
-
- if se.MDC {
- // MDC packets have an embedded hash that we need to check.
- h := sha1.New()
- h.Write(se.prefix)
- return &seMDCReader{in: plaintext, h: h}, nil
- }
-
- // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
- return seReader{plaintext}, nil
-}
-
-// seReader wraps an io.Reader with a no-op Close method.
-type seReader struct {
- in io.Reader
-}
-
-func (ser seReader) Read(buf []byte) (int, error) {
- return ser.in.Read(buf)
-}
-
-func (ser seReader) Close() error {
- return nil
-}
-
-const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
-
-// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
-// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
-// MDC packet containing a hash of the previous contents which is checked
-// against the running hash. See RFC 4880, section 5.13.
-type seMDCReader struct {
- in io.Reader
- h hash.Hash
- trailer [mdcTrailerSize]byte
- scratch [mdcTrailerSize]byte
- trailerUsed int
- error bool
- eof bool
-}
-
-func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
- if ser.error {
- err = io.ErrUnexpectedEOF
- return
- }
- if ser.eof {
- err = io.EOF
- return
- }
-
- // If we haven't yet filled the trailer buffer then we must do that
- // first.
- for ser.trailerUsed < mdcTrailerSize {
- n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
- ser.trailerUsed += n
- if err == io.EOF {
- if ser.trailerUsed != mdcTrailerSize {
- n = 0
- err = io.ErrUnexpectedEOF
- ser.error = true
- return
- }
- ser.eof = true
- n = 0
- return
- }
-
- if err != nil {
- n = 0
- return
- }
- }
-
- // If it's a short read then we read into a temporary buffer and shift
- // the data into the caller's buffer.
- if len(buf) <= mdcTrailerSize {
- n, err = readFull(ser.in, ser.scratch[:len(buf)])
- copy(buf, ser.trailer[:n])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], ser.trailer[n:])
- copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
- if n < len(buf) {
- ser.eof = true
- err = io.EOF
- }
- return
- }
-
- n, err = ser.in.Read(buf[mdcTrailerSize:])
- copy(buf, ser.trailer[:])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], buf[n:])
-
- if err == io.EOF {
- ser.eof = true
- }
- return
-}
-
-// This is a new-format packet tag byte for a type 19 (MDC) packet.
-const mdcPacketTagByte = byte(0x80) | 0x40 | 19
-
-func (ser *seMDCReader) Close() error {
- if ser.error {
- return errors.SignatureError("error during reading")
- }
-
- for !ser.eof {
- // We haven't seen EOF so we need to read to the end
- var buf [1024]byte
- _, err := ser.Read(buf[:])
- if err == io.EOF {
- break
- }
- if err != nil {
- return errors.SignatureError("error during reading")
- }
- }
-
- if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
- return errors.SignatureError("MDC packet not found")
- }
- ser.h.Write(ser.trailer[:2])
-
- final := ser.h.Sum(nil)
- if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
- return errors.SignatureError("hash mismatch")
- }
- return nil
-}
-
-// An seMDCWriter writes through to an io.WriteCloser while maintains a running
-// hash of the data written. On close, it emits an MDC packet containing the
-// running hash.
-type seMDCWriter struct {
- w io.WriteCloser
- h hash.Hash
-}
-
-func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
- w.h.Write(buf)
- return w.w.Write(buf)
-}
-
-func (w *seMDCWriter) Close() (err error) {
- var buf [mdcTrailerSize]byte
-
- buf[0] = mdcPacketTagByte
- buf[1] = sha1.Size
- w.h.Write(buf[:2])
- digest := w.h.Sum(nil)
- copy(buf[2:], digest)
-
- _, err = w.w.Write(buf[:])
- if err != nil {
- return
- }
- return w.w.Close()
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
-// to w and returns a WriteCloser to which the to-be-encrypted packets can be
-// written.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) {
- if c.KeySize() != len(key) {
- return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
- }
- writeCloser := noOpCloser{w}
- ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
- if err != nil {
- return
- }
-
- _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion})
- if err != nil {
- return
- }
-
- block := c.new(key)
- blockSize := block.BlockSize()
- iv := make([]byte, blockSize)
- _, err = config.Random().Read(iv)
- if err != nil {
- return
- }
- s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
- _, err = ciphertext.Write(prefix)
- if err != nil {
- return
- }
- plaintext := cipher.StreamWriter{S: s, W: ciphertext}
-
- h := sha1.New()
- h.Write(iv)
- h.Write(iv[blockSize-2:])
- contents = &seMDCWriter{w: plaintext, h: h}
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go
deleted file mode 100644
index 96a2b382..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "image"
- "image/jpeg"
- "io"
- "io/ioutil"
-)
-
-const UserAttrImageSubpacket = 1
-
-// UserAttribute is capable of storing other types of data about a user
-// beyond name, email and a text comment. In practice, user attributes are typically used
-// to store a signed thumbnail photo JPEG image of the user.
-// See RFC 4880, section 5.12.
-type UserAttribute struct {
- Contents []*OpaqueSubpacket
-}
-
-// NewUserAttributePhoto creates a user attribute packet
-// containing the given images.
-func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
- uat = new(UserAttribute)
- for _, photo := range photos {
- var buf bytes.Buffer
- // RFC 4880, Section 5.12.1.
- data := []byte{
- 0x10, 0x00, // Little-endian image header length (16 bytes)
- 0x01, // Image header version 1
- 0x01, // JPEG
- 0, 0, 0, 0, // 12 reserved octets, must be all zero.
- 0, 0, 0, 0,
- 0, 0, 0, 0}
- if _, err = buf.Write(data); err != nil {
- return
- }
- if err = jpeg.Encode(&buf, photo, nil); err != nil {
- return
- }
- uat.Contents = append(uat.Contents, &OpaqueSubpacket{
- SubType: UserAttrImageSubpacket,
- Contents: buf.Bytes()})
- }
- return
-}
-
-// NewUserAttribute creates a new user attribute packet containing the given subpackets.
-func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
- return &UserAttribute{Contents: contents}
-}
-
-func (uat *UserAttribute) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.13
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return
- }
- uat.Contents, err = OpaqueSubpackets(b)
- return
-}
-
-// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
-// header.
-func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
- var buf bytes.Buffer
- for _, sp := range uat.Contents {
- sp.Serialize(&buf)
- }
- if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
- return err
- }
- _, err = w.Write(buf.Bytes())
- return
-}
-
-// ImageData returns zero or more byte slices, each containing
-// JPEG File Interchange Format (JFIF), for each photo in the
-// the user attribute packet.
-func (uat *UserAttribute) ImageData() (imageData [][]byte) {
- for _, sp := range uat.Contents {
- if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
- imageData = append(imageData, sp.Contents[16:])
- }
- }
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go
deleted file mode 100644
index d6bea7d4..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
- "io/ioutil"
- "strings"
-)
-
-// UserId contains text that is intended to represent the name and email
-// address of the key holder. See RFC 4880, section 5.11. By convention, this
-// takes the form "Full Name (Comment) "
-type UserId struct {
- Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below.
-
- Name, Comment, Email string
-}
-
-func hasInvalidCharacters(s string) bool {
- for _, c := range s {
- switch c {
- case '(', ')', '<', '>', 0:
- return true
- }
- }
- return false
-}
-
-// NewUserId returns a UserId or nil if any of the arguments contain invalid
-// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
-func NewUserId(name, comment, email string) *UserId {
- // RFC 4880 doesn't deal with the structure of userid strings; the
- // name, comment and email form is just a convention. However, there's
- // no convention about escaping the metacharacters and GPG just refuses
- // to create user ids where, say, the name contains a '('. We mirror
- // this behaviour.
-
- if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
- return nil
- }
-
- uid := new(UserId)
- uid.Name, uid.Comment, uid.Email = name, comment, email
- uid.Id = name
- if len(comment) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "("
- uid.Id += comment
- uid.Id += ")"
- }
- if len(email) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "<"
- uid.Id += email
- uid.Id += ">"
- }
- return uid
-}
-
-func (uid *UserId) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.11
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return
- }
- uid.Id = string(b)
- uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
- return
-}
-
-// Serialize marshals uid to w in the form of an OpenPGP packet, including
-// header.
-func (uid *UserId) Serialize(w io.Writer) error {
- err := serializeHeader(w, packetTypeUserId, len(uid.Id))
- if err != nil {
- return err
- }
- _, err = w.Write([]byte(uid.Id))
- return err
-}
-
-// parseUserId extracts the name, comment and email from a user id string that
-// is formatted as "Full Name (Comment) ".
-func parseUserId(id string) (name, comment, email string) {
- var n, c, e struct {
- start, end int
- }
- var state int
-
- for offset, rune := range id {
- switch state {
- case 0:
- // Entering name
- n.start = offset
- state = 1
- fallthrough
- case 1:
- // In name
- if rune == '(' {
- state = 2
- n.end = offset
- } else if rune == '<' {
- state = 5
- n.end = offset
- }
- case 2:
- // Entering comment
- c.start = offset
- state = 3
- fallthrough
- case 3:
- // In comment
- if rune == ')' {
- state = 4
- c.end = offset
- }
- case 4:
- // Between comment and email
- if rune == '<' {
- state = 5
- }
- case 5:
- // Entering email
- e.start = offset
- state = 6
- fallthrough
- case 6:
- // In email
- if rune == '>' {
- state = 7
- e.end = offset
- }
- default:
- // After email
- }
- }
- switch state {
- case 1:
- // ended in the name
- n.end = len(id)
- case 3:
- // ended in comment
- c.end = len(id)
- case 6:
- // ended in email
- e.end = len(id)
- }
-
- name = strings.TrimSpace(id[n.start:n.end])
- comment = strings.TrimSpace(id[c.start:c.end])
- email = strings.TrimSpace(id[e.start:e.end])
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/patch.sh b/vendor/github.com/keybase/go-crypto/openpgp/patch.sh
deleted file mode 100644
index 23cacc83..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/patch.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-patch < sig-v3.patch
-patch < s2k-gnu-dummy.patch
-find . -type f -name '*.go' -exec sed -i'' -e 's/golang.org\/x\/crypto\/openpgp/github.com\/keybase\/go-crypto\/openpgp/' {} \;
-find . -type f -name '*.go-e' -exec rm {} \;
-go test ./...
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/read.go b/vendor/github.com/keybase/go-crypto/openpgp/read.go
deleted file mode 100644
index 790630e5..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/read.go
+++ /dev/null
@@ -1,500 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package openpgp implements high level operations on OpenPGP messages.
-package openpgp // import "github.com/keybase/go-crypto/openpgp"
-
-import (
- "crypto"
- "crypto/hmac"
- _ "crypto/sha256"
- "hash"
- "io"
- "strconv"
-
- "github.com/keybase/go-crypto/openpgp/armor"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/packet"
-)
-
-// SignatureType is the armor type for a PGP signature.
-var SignatureType = "PGP SIGNATURE"
-
-// readArmored reads an armored block with the given type.
-func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
- block, err := armor.Decode(r)
- if err != nil {
- return
- }
-
- if block.Type != expectedType {
- return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
- }
-
- return block.Body, nil
-}
-
-// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
-// signed message.
-type MessageDetails struct {
- IsEncrypted bool // true if the message was encrypted.
- EncryptedToKeyIds []uint64 // the list of recipient key ids.
- IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
- DecryptedWith Key // the private key used to decrypt the message, if any.
- IsSigned bool // true if the message is signed.
- SignedByKeyId uint64 // the key id of the signer, if any.
- SignedBy *Key // the key of the signer, if available.
- LiteralData *packet.LiteralData // the metadata of the contents
- UnverifiedBody io.Reader // the contents of the message.
-
- // If IsSigned is true and SignedBy is non-zero then the signature will
- // be verified as UnverifiedBody is read. The signature cannot be
- // checked until the whole of UnverifiedBody is read so UnverifiedBody
- // must be consumed until EOF before the data can trusted. Even if a
- // message isn't signed (or the signer is unknown) the data may contain
- // an authentication code that is only checked once UnverifiedBody has
- // been consumed. Once EOF has been seen, the following fields are
- // valid. (An authentication code failure is reported as a
- // SignatureError error when reading from UnverifiedBody.)
- SignatureError error // nil if the signature is good.
- Signature *packet.Signature // the signature packet itself, if v4 (default)
- SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
-
- // Does the Message include multiple signatures? Also called "nested signatures".
- MultiSig bool
-
- decrypted io.ReadCloser
-}
-
-// A PromptFunction is used as a callback by functions that may need to decrypt
-// a private key, or prompt for a passphrase. It is called with a list of
-// acceptable, encrypted private keys and a boolean that indicates whether a
-// passphrase is usable. It should either decrypt a private key or return a
-// passphrase to try. If the decrypted private key or given passphrase isn't
-// correct, the function will be called again, forever. Any error returned will
-// be passed up.
-type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
-
-// A keyEnvelopePair is used to store a private key with the envelope that
-// contains a symmetric key, encrypted with that key.
-type keyEnvelopePair struct {
- key Key
- encryptedKey *packet.EncryptedKey
-}
-
-// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
-// The given KeyRing should contain both public keys (for signature
-// verification) and, possibly encrypted, private keys for decrypting.
-// If config is nil, sensible defaults will be used.
-func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
- var p packet.Packet
-
- var symKeys []*packet.SymmetricKeyEncrypted
- var pubKeys []keyEnvelopePair
- var se *packet.SymmetricallyEncrypted
-
- packets := packet.NewReader(r)
- md = new(MessageDetails)
- md.IsEncrypted = true
-
- // The message, if encrypted, starts with a number of packets
- // containing an encrypted decryption key. The decryption key is either
- // encrypted to a public key, or with a passphrase. This loop
- // collects these packets.
-ParsePackets:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.SymmetricKeyEncrypted:
- // This packet contains the decryption key encrypted with a passphrase.
- md.IsSymmetricallyEncrypted = true
- symKeys = append(symKeys, p)
- case *packet.EncryptedKey:
- // This packet contains the decryption key encrypted to a public key.
- md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
- switch p.Algo {
- case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH:
- break
- default:
- continue
- }
- var keys []Key
- if p.KeyId == 0 {
- keys = keyring.DecryptionKeys()
- } else {
- keys = keyring.KeysById(p.KeyId, nil)
- }
- for _, k := range keys {
- pubKeys = append(pubKeys, keyEnvelopePair{k, p})
- }
- case *packet.SymmetricallyEncrypted:
- se = p
- break ParsePackets
- case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
- // This message isn't encrypted.
- if len(symKeys) != 0 || len(pubKeys) != 0 {
- return nil, errors.StructuralError("key material not followed by encrypted message")
- }
- packets.Unread(p)
- return readSignedMessage(packets, nil, keyring)
- }
- }
-
- var candidates []Key
- var decrypted io.ReadCloser
-
- // Now that we have the list of encrypted keys we need to decrypt at
- // least one of them or, if we cannot, we need to call the prompt
- // function so that it can decrypt a key or give us a passphrase.
-FindKey:
- for {
- // See if any of the keys already have a private key available
- candidates = candidates[:0]
- candidateFingerprints := make(map[string]bool)
-
- for _, pk := range pubKeys {
- if pk.key.PrivateKey == nil {
- continue
- }
- if !pk.key.PrivateKey.Encrypted {
- if len(pk.encryptedKey.Key) == 0 {
- pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
- }
- if len(pk.encryptedKey.Key) == 0 {
- continue
- }
- decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
- if err != nil && err != errors.ErrKeyIncorrect {
- return nil, err
- }
- if decrypted != nil {
- md.DecryptedWith = pk.key
- break FindKey
- }
- } else {
- fpr := string(pk.key.PublicKey.Fingerprint[:])
- if v := candidateFingerprints[fpr]; v {
- continue
- }
- candidates = append(candidates, pk.key)
- candidateFingerprints[fpr] = true
- }
- }
-
- if len(candidates) == 0 && len(symKeys) == 0 {
- return nil, errors.ErrKeyIncorrect
- }
-
- if prompt == nil {
- return nil, errors.ErrKeyIncorrect
- }
-
- passphrase, err := prompt(candidates, len(symKeys) != 0)
- if err != nil {
- return nil, err
- }
-
- // Try the symmetric passphrase first
- if len(symKeys) != 0 && passphrase != nil {
- for _, s := range symKeys {
- key, cipherFunc, err := s.Decrypt(passphrase)
- if err == nil {
- decrypted, err = se.Decrypt(cipherFunc, key)
- if err != nil && err != errors.ErrKeyIncorrect {
- return nil, err
- }
- if decrypted != nil {
- break FindKey
- }
- }
-
- }
- }
- }
-
- md.decrypted = decrypted
- if err := packets.Push(decrypted); err != nil {
- return nil, err
- }
- return readSignedMessage(packets, md, keyring)
-}
-
-// readSignedMessage reads a possibly signed message if mdin is non-zero then
-// that structure is updated and returned. Otherwise a fresh MessageDetails is
-// used.
-func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
- if mdin == nil {
- mdin = new(MessageDetails)
- }
- md = mdin
-
- var p packet.Packet
- var h hash.Hash
- var wrappedHash hash.Hash
-FindLiteralData:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.Compressed:
- if err := packets.Push(p.Body); err != nil {
- return nil, err
- }
- case *packet.OnePassSignature:
- if md.IsSigned {
- // If IsSigned is set, it means we have multiple
- // OnePassSignature packets.
- md.MultiSig = true
- if md.SignedBy != nil {
- // We've already found the signature we were looking
- // for, made by key that we had in keyring and can
- // check signature against. Continue with that instead
- // of trying to find another.
- continue FindLiteralData
- }
- }
-
- h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
- if err != nil {
- md = nil
- return
- }
-
- md.IsSigned = true
- md.SignedByKeyId = p.KeyId
- keys := keyring.KeysByIdUsage(p.KeyId, nil, packet.KeyFlagSign)
- if len(keys) > 0 {
- md.SignedBy = &keys[0]
- }
- case *packet.LiteralData:
- md.LiteralData = p
- break FindLiteralData
- }
- }
-
- if md.SignedBy != nil {
- md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
- } else if md.decrypted != nil {
- md.UnverifiedBody = checkReader{md}
- } else {
- md.UnverifiedBody = md.LiteralData.Body
- }
-
- return md, nil
-}
-
-// hashForSignature returns a pair of hashes that can be used to verify a
-// signature. The signature may specify that the contents of the signed message
-// should be preprocessed (i.e. to normalize line endings). Thus this function
-// returns two hashes. The second should be used to hash the message itself and
-// performs any needed preprocessing.
-func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
- if !hashId.Available() {
- return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
- }
- h := hashId.New()
-
- switch sigType {
- case packet.SigTypeBinary:
- return h, h, nil
- case packet.SigTypeText:
- return h, NewCanonicalTextHash(h), nil
- }
-
- return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
-}
-
-// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
-// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
-// MDC checks.
-type checkReader struct {
- md *MessageDetails
-}
-
-func (cr checkReader) Read(buf []byte) (n int, err error) {
- n, err = cr.md.LiteralData.Body.Read(buf)
- if err == io.EOF {
- mdcErr := cr.md.decrypted.Close()
- if mdcErr != nil {
- err = mdcErr
- }
- }
- return
-}
-
-// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
-// the data as it is read. When it sees an EOF from the underlying io.Reader
-// it parses and checks a trailing Signature packet and triggers any MDC checks.
-type signatureCheckReader struct {
- packets *packet.Reader
- h, wrappedHash hash.Hash
- md *MessageDetails
-}
-
-func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
- n, err = scr.md.LiteralData.Body.Read(buf)
- scr.wrappedHash.Write(buf[:n])
- if err == io.EOF {
- for {
- var p packet.Packet
- p, scr.md.SignatureError = scr.packets.Next()
- if scr.md.SignatureError != nil {
- if scr.md.MultiSig {
- // If we are in MultiSig, we might have found other
- // signature that cannot be verified using our key.
- // Clear Signature field so it's clear for consumers
- // that this message failed to verify.
- scr.md.Signature = nil
- }
- return
- }
-
- var ok bool
- if scr.md.Signature, ok = p.(*packet.Signature); ok {
- var err error
- if keyID := scr.md.Signature.IssuerKeyId; keyID != nil {
- if *keyID != scr.md.SignedBy.PublicKey.KeyId {
- if scr.md.MultiSig {
- continue // try again to find a sig we can verify
- }
- err = errors.StructuralError("bad key id")
- }
- }
- if fingerprint := scr.md.Signature.IssuerFingerprint; fingerprint != nil {
- if !hmac.Equal(fingerprint, scr.md.SignedBy.PublicKey.Fingerprint[:]) {
- if scr.md.MultiSig {
- continue // try again to find a sig we can verify
- }
- err = errors.StructuralError("bad key fingerprint")
- }
- }
- if err == nil {
- err = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
- }
- scr.md.SignatureError = err
- } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
- scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
- } else {
- scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
- return
- }
-
- // Parse only one packet by default, unless message is MultiSig. Then
- // we ask for more packets after discovering non-matching signature,
- // until we find one that we can verify.
- break
- }
-
- // The SymmetricallyEncrypted packet, if any, might have an
- // unsigned hash of its own. In order to check this we need to
- // close that Reader.
- if scr.md.decrypted != nil {
- mdcErr := scr.md.decrypted.Close()
- if mdcErr != nil {
- err = mdcErr
- }
- }
- }
- return
-}
-
-// CheckDetachedSignature takes a signed file and a detached signature and
-// returns the signer if the signature is valid. If the signer isn't known,
-// ErrUnknownIssuer is returned.
-func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
- signer, _, err = checkDetachedSignature(keyring, signed, signature)
- return signer, err
-}
-
-func checkDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) {
- var issuerKeyId uint64
- var issuerFingerprint []byte
- var hashFunc crypto.Hash
- var sigType packet.SignatureType
- var keys []Key
- var p packet.Packet
-
- packets := packet.NewReader(signature)
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return nil, nil, errors.ErrUnknownIssuer
- }
- if err != nil {
- return nil, nil, err
- }
-
- switch sig := p.(type) {
- case *packet.Signature:
- if sig.IssuerKeyId == nil {
- return nil, nil, errors.StructuralError("signature doesn't have an issuer")
- }
- issuerKeyId = *sig.IssuerKeyId
- hashFunc = sig.Hash
- sigType = sig.SigType
- issuerFingerprint = sig.IssuerFingerprint
- case *packet.SignatureV3:
- issuerKeyId = sig.IssuerKeyId
- hashFunc = sig.Hash
- sigType = sig.SigType
- default:
- return nil, nil, errors.StructuralError("non signature packet found")
- }
-
- keys = keyring.KeysByIdUsage(issuerKeyId, issuerFingerprint, packet.KeyFlagSign)
- if len(keys) > 0 {
- break
- }
- }
-
- if len(keys) == 0 {
- panic("unreachable")
- }
-
- h, wrappedHash, err := hashForSignature(hashFunc, sigType)
- if err != nil {
- return nil, nil, err
- }
-
- if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
- return nil, nil, err
- }
-
- for _, key := range keys {
- switch sig := p.(type) {
- case *packet.Signature:
- err = key.PublicKey.VerifySignature(h, sig)
- case *packet.SignatureV3:
- err = key.PublicKey.VerifySignatureV3(h, sig)
- default:
- panic("unreachable")
- }
-
- if err == nil {
- return key.Entity, &issuerKeyId, nil
- }
- }
-
- return nil, nil, err
-}
-
-// CheckArmoredDetachedSignature performs the same actions as
-// CheckDetachedSignature but expects the signature to be armored.
-func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
- signer, _, err = checkArmoredDetachedSignature(keyring, signed, signature)
- return signer, err
-}
-
-func checkArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) {
- body, err := readArmored(signature, SignatureType)
- if err != nil {
- return
- }
- return checkDetachedSignature(keyring, signed, body)
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go
deleted file mode 100644
index 01bb6785..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package s2k implements the various OpenPGP string-to-key transforms as
-// specified in RFC 4800 section 3.7.1.
-package s2k // import "github.com/keybase/go-crypto/openpgp/s2k"
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
-
- "github.com/keybase/go-crypto/openpgp/errors"
-)
-
-// Config collects configuration parameters for s2k key-stretching
-// transformatioms. A nil *Config is valid and results in all default
-// values. Currently, Config is used only by the Serialize function in
-// this package.
-type Config struct {
- // Hash is the default hash function to be used. If
- // nil, SHA1 is used.
- Hash crypto.Hash
- // S2KCount is only used for symmetric encryption. It
- // determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 1024 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 65536 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. When set, it is strongly encrouraged to
- // use a value that is at least 65536. See RFC 4880 Section
- // 3.7.1.3.
- S2KCount int
-}
-
-func (c *Config) hash() crypto.Hash {
- if c == nil || uint(c.Hash) == 0 {
- // SHA1 is the historical default in this package.
- return crypto.SHA1
- }
-
- return c.Hash
-}
-
-func (c *Config) encodedCount() uint8 {
- if c == nil || c.S2KCount == 0 {
- return 96 // The common case. Correspoding to 65536
- }
-
- i := c.S2KCount
- switch {
- // Behave like GPG. Should we make 65536 the lowest value used?
- case i < 1024:
- i = 1024
- case i > 65011712:
- i = 65011712
- }
-
- return encodeCount(i)
-}
-
-// encodeCount converts an iterative "count" in the range 1024 to
-// 65011712, inclusive, to an encoded count. The return value is the
-// octet that is actually stored in the GPG file. encodeCount panics
-// if i is not in the above range (encodedCount above takes care to
-// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
-func encodeCount(i int) uint8 {
- if i < 1024 || i > 65011712 {
- panic("count arg i outside the required range")
- }
-
- for encoded := 0; encoded < 256; encoded++ {
- count := decodeCount(uint8(encoded))
- if count >= i {
- return uint8(encoded)
- }
- }
-
- return 255
-}
-
-// decodeCount returns the s2k mode 3 iterative "count" corresponding to
-// the encoded octet c.
-func decodeCount(c uint8) int {
- return (16 + int(c&15)) << (uint32(c>>4) + 6)
-}
-
-// Simple writes to out the result of computing the Simple S2K function (RFC
-// 4880, section 3.7.1.1) using the given hash and input passphrase.
-func Simple(out []byte, h hash.Hash, in []byte) {
- Salted(out, h, in, nil)
-}
-
-var zero [1]byte
-
-// Salted writes to out the result of computing the Salted S2K function (RFC
-// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
-func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
- done := 0
- var digest []byte
-
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- h.Write(salt)
- h.Write(in)
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-// Iterated writes to out the result of computing the Iterated and Salted S2K
-// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
-// salt and iteration count.
-func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
- combined := make([]byte, len(in)+len(salt))
- copy(combined, salt)
- copy(combined[len(salt):], in)
-
- if count < len(combined) {
- count = len(combined)
- }
-
- done := 0
- var digest []byte
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- written := 0
- for written < count {
- if written+len(combined) > count {
- todo := count - written
- h.Write(combined[:todo])
- written = count
- } else {
- h.Write(combined)
- written += len(combined)
- }
- }
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-func parseGNUExtensions(r io.Reader) (f func(out, in []byte), err error) {
- var buf [9]byte
-
- // A three-byte string identifier
- _, err = io.ReadFull(r, buf[:3])
- if err != nil {
- return
- }
- gnuExt := string(buf[:3])
-
- if gnuExt != "GNU" {
- return nil, errors.UnsupportedError("Malformed GNU extension: " + gnuExt)
- }
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return
- }
- gnuExtType := int(buf[0])
- switch gnuExtType {
- case 1:
- return nil, nil
- case 2:
- // Read a serial number, which is prefixed by a 1-byte length.
- // The maximum length is 16.
- var lenBuf [1]byte
- _, err = io.ReadFull(r, lenBuf[:])
- if err != nil {
- return
- }
-
- maxLen := 16
- ivLen := int(lenBuf[0])
- if ivLen > maxLen {
- ivLen = maxLen
- }
- ivBuf := make([]byte, ivLen)
- // For now we simply discard the IV
- _, err = io.ReadFull(r, ivBuf)
- if err != nil {
- return
- }
- return nil, nil
- default:
- return nil, errors.UnsupportedError("unknown S2K GNU protection mode: " + strconv.Itoa(int(gnuExtType)))
- }
-}
-
-// Parse reads a binary specification for a string-to-key transformation from r
-// and returns a function which performs that transform.
-func Parse(r io.Reader) (f func(out, in []byte), err error) {
- var buf [9]byte
-
- _, err = io.ReadFull(r, buf[:2])
- if err != nil {
- return
- }
-
- // GNU Extensions; handle them before we try to look for a hash, which won't
- // be needed in most cases anyway.
- if buf[0] == 101 {
- return parseGNUExtensions(r)
- }
-
- hash, ok := HashIdToHash(buf[1])
- if !ok {
- return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
- }
- if !hash.Available() {
- return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
- }
- h := hash.New()
-
- switch buf[0] {
- case 0:
- f := func(out, in []byte) {
- Simple(out, h, in)
- }
- return f, nil
- case 1:
- _, err = io.ReadFull(r, buf[:8])
- if err != nil {
- return
- }
- f := func(out, in []byte) {
- Salted(out, h, in, buf[:8])
- }
- return f, nil
- case 3:
- _, err = io.ReadFull(r, buf[:9])
- if err != nil {
- return
- }
- count := decodeCount(buf[8])
- f := func(out, in []byte) {
- Iterated(out, h, in, buf[:8], count)
- }
- return f, nil
- }
-
- return nil, errors.UnsupportedError("S2K function")
-}
-
-// Serialize salts and stretches the given passphrase and writes the
-// resulting key into key. It also serializes an S2K descriptor to
-// w. The key stretching can be configured with c, which may be
-// nil. In that case, sensible defaults will be used.
-func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
- var buf [11]byte
- buf[0] = 3 /* iterated and salted */
- buf[1], _ = HashToHashId(c.hash())
- salt := buf[2:10]
- if _, err := io.ReadFull(rand, salt); err != nil {
- return err
- }
- encodedCount := c.encodedCount()
- count := decodeCount(encodedCount)
- buf[10] = encodedCount
- if _, err := w.Write(buf[:]); err != nil {
- return err
- }
-
- Iterated(key, c.hash().New(), passphrase, salt, count)
- return nil
-}
-
-// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with
-// Go's crypto.Hash type. See RFC 4880, section 9.4.
-var hashToHashIdMapping = []struct {
- id byte
- hash crypto.Hash
- name string
-}{
- {1, crypto.MD5, "MD5"},
- {2, crypto.SHA1, "SHA1"},
- {3, crypto.RIPEMD160, "RIPEMD160"},
- {8, crypto.SHA256, "SHA256"},
- {9, crypto.SHA384, "SHA384"},
- {10, crypto.SHA512, "SHA512"},
- {11, crypto.SHA224, "SHA224"},
-}
-
-// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id.
-func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
- for _, m := range hashToHashIdMapping {
- if m.id == id {
- return m.hash, true
- }
- }
- return 0, false
-}
-
-// HashIdToString returns the name of the hash function corresponding to the
-// given OpenPGP hash id, or panics if id is unknown.
-func HashIdToString(id byte) (name string, ok bool) {
- for _, m := range hashToHashIdMapping {
- if m.id == id {
- return m.name, true
- }
- }
-
- return "", false
-}
-
-// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash.
-func HashToHashId(h crypto.Hash) (id byte, ok bool) {
- for _, m := range hashToHashIdMapping {
- if m.hash == h {
- return m.id, true
- }
- }
- return 0, false
-}
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch b/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch
deleted file mode 100644
index bfd764af..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch
+++ /dev/null
@@ -1,135 +0,0 @@
-diff --git a/openpgp/read.go b/openpgp/read.go
-index a6cecc5..0c9397b 100644
---- a/openpgp/read.go
-+++ b/openpgp/read.go
-@@ -56,8 +56,9 @@ type MessageDetails struct {
- // been consumed. Once EOF has been seen, the following fields are
- // valid. (An authentication code failure is reported as a
- // SignatureError error when reading from UnverifiedBody.)
-- SignatureError error // nil if the signature is good.
-- Signature *packet.Signature // the signature packet itself.
-+ SignatureError error // nil if the signature is good.
-+ Signature *packet.Signature // the signature packet itself, if v4 (default)
-+ SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
-
- decrypted io.ReadCloser
- }
-@@ -334,13 +335,15 @@ func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
- }
-
- var ok bool
-- if scr.md.Signature, ok = p.(*packet.Signature); !ok {
-+ if scr.md.Signature, ok = p.(*packet.Signature); ok {
-+ scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
-+ } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
-+ scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
-+ } else {
- scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
- return
- }
-
-- scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
--
- // The SymmetricallyEncrypted packet, if any, might have an
- // unsigned hash of its own. In order to check this we need to
- // close that Reader.
-diff --git a/openpgp/read_test.go b/openpgp/read_test.go
-index 52f942c..abe8d7b 100644
---- a/openpgp/read_test.go
-+++ b/openpgp/read_test.go
-@@ -13,6 +13,7 @@ import (
- "strings"
- "testing"
-
-+ "golang.org/x/crypto/openpgp/armor"
- "golang.org/x/crypto/openpgp/errors"
- )
-
-@@ -411,6 +412,50 @@ func TestIssue11504(t *testing.T) {
- testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130")
- }
-
-+// TestSignatureV3Message tests the verification of V3 signature, generated
-+// with a modern V4-style key. Some people have their clients set to generate
-+// V3 signatures, so it's useful to be able to verify them.
-+func TestSignatureV3Message(t *testing.T) {
-+ sig, err := armor.Decode(strings.NewReader(signedMessageV3))
-+ if err != nil {
-+ t.Error(err)
-+ return
-+ }
-+ key, err := ReadArmoredKeyRing(strings.NewReader(keyV4forVerifyingSignedMessageV3))
-+ if err != nil {
-+ t.Error(err)
-+ return
-+ }
-+ md, err := ReadMessage(sig.Body, key, nil, nil)
-+ if err != nil {
-+ t.Error(err)
-+ return
-+ }
-+
-+ _, err = ioutil.ReadAll(md.UnverifiedBody)
-+ if err != nil {
-+ t.Error(err)
-+ return
-+ }
-+
-+ // We'll see a sig error here after reading in the UnverifiedBody above,
-+ // if there was one to see.
-+ if err = md.SignatureError; err != nil {
-+ t.Error(err)
-+ return
-+ }
-+
-+ if md.SignatureV3 == nil {
-+ t.Errorf("No available signature after checking signature")
-+ return
-+ }
-+ if md.Signature != nil {
-+ t.Errorf("Did not expect a signature V4 back")
-+ return
-+ }
-+ return
-+}
-+
- const testKey1KeyId = 0xA34D7E18C20C31BB
- const testKey3KeyId = 0x338934250CCC0360
-
-@@ -504,3 +549,36 @@ const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6
- const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
- const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000`
-+
-+const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-+Comment: GPGTools - https://gpgtools.org
-+
-+mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY
-+BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z
-+tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0
-+JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV
-+/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+
-+K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H
-+JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx
-+YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1
-+b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi
-+UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M
-+pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM
-+AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz
-+786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd
-+EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB
-+=RZia
-+-----END PGP PUBLIC KEY BLOCK-----
-+`
-+
-+const signedMessageV3 = `-----BEGIN PGP MESSAGE-----
-+Comment: GPGTools - https://gpgtools.org
-+
-+owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP
-+q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka
-+uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka
-+DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d
-+iT57d/OhWwA=
-+=hG7R
-+-----END PGP MESSAGE-----
-+`
diff --git a/vendor/github.com/keybase/go-crypto/openpgp/write.go b/vendor/github.com/keybase/go-crypto/openpgp/write.go
deleted file mode 100644
index 89ef132b..00000000
--- a/vendor/github.com/keybase/go-crypto/openpgp/write.go
+++ /dev/null
@@ -1,506 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/keybase/go-crypto/openpgp/armor"
- "github.com/keybase/go-crypto/openpgp/errors"
- "github.com/keybase/go-crypto/openpgp/packet"
- "github.com/keybase/go-crypto/openpgp/s2k"
-)
-
-// DetachSign signs message with the private key from signer (which must
-// already have been decrypted) and writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// ArmoredDetachSign signs message with the private key from signer (which
-// must already have been decrypted) and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
- return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// DetachSignText signs message (after canonicalising the line endings) with
-// the private key from signer (which must already have been decrypted) and
-// writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-// ArmoredDetachSignText signs message (after canonicalising the line endings)
-// with the private key from signer (which must already have been decrypted)
-// and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- out, err := armor.Encode(w, SignatureType, nil)
- if err != nil {
- return
- }
- err = detachSign(out, signer, message, sigType, config)
- if err != nil {
- return
- }
- return out.Close()
-}
-
-// SignWithSigner signs the message of type sigType with s and writes the
-// signature to w.
-// If config is nil, sensible defaults will be used.
-func SignWithSigner(s packet.Signer, w io.Writer, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- keyId := s.KeyId()
- sig := new(packet.Signature)
- sig.SigType = sigType
- sig.PubKeyAlgo = s.PublicKeyAlgo()
- sig.Hash = config.Hash()
- sig.CreationTime = config.Now()
- sig.IssuerKeyId = &keyId
-
- s.Reset()
-
- wrapped := s.(hash.Hash)
-
- if sigType == packet.SigTypeText {
- wrapped = NewCanonicalTextHash(s)
- }
-
- io.Copy(wrapped, message)
-
- err = sig.Sign(s, nil, config)
- if err != nil {
- return
- }
-
- err = sig.Serialize(w)
-
- return
-}
-
-func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- signerSubkey, ok := signer.signingKey(config.Now())
- if !ok {
- err = errors.InvalidArgumentError("no valid signing keys")
- return
- }
- if signerSubkey.PrivateKey == nil {
- return errors.InvalidArgumentError("signing key doesn't have a private key")
- }
- if signerSubkey.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing key is encrypted")
- }
-
- sig := new(packet.Signature)
- sig.SigType = sigType
- sig.PubKeyAlgo = signerSubkey.PrivateKey.PubKeyAlgo
- sig.Hash = config.Hash()
- sig.CreationTime = config.Now()
- sig.IssuerKeyId = &signerSubkey.PrivateKey.KeyId
-
- h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
- if err != nil {
- return
- }
- io.Copy(wrappedHash, message)
-
- err = sig.Sign(h, signerSubkey.PrivateKey, config)
- if err != nil {
- return
- }
-
- return sig.Serialize(w)
-}
-
-// FileHints contains metadata about encrypted files. This metadata is, itself,
-// encrypted.
-type FileHints struct {
- // IsBinary can be set to hint that the contents are binary data.
- IsBinary bool
- // FileName hints at the name of the file that should be written. It's
- // truncated to 255 bytes if longer. It may be empty to suggest that the
- // file should not be written to disk. It may be equal to "_CONSOLE" to
- // suggest the data should not be written to disk.
- FileName string
- // ModTime contains the modification time of the file, or the zero time if not applicable.
- ModTime time.Time
-}
-
-// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
-// The resulting WriteCloser must be closed after the contents of the file have
-// been written.
-// If config is nil, sensible defaults will be used.
-func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if hints == nil {
- hints = &FileHints{}
- }
-
- key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
- if err != nil {
- return
- }
- w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config)
- if err != nil {
- return
- }
-
- literaldata := w
- if algo := config.Compression(); algo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- literaldata, err = packet.SerializeCompressed(w, algo, compConfig)
- if err != nil {
- return
- }
- }
-
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds)
-}
-
-// intersectPreferences mutates and returns a prefix of a that contains only
-// the values in the intersection of a and b. The order of a is preserved.
-func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
- var j int
- for _, v := range a {
- for _, v2 := range b {
- if v == v2 {
- a[j] = v
- j++
- break
- }
- }
- }
-
- return a[:j]
-}
-
-func hashToHashId(h crypto.Hash) uint8 {
- v, ok := s2k.HashToHashId(h)
- if !ok {
- panic("tried to convert unknown hash")
- }
- return v
-}
-
-// Encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- var signer *packet.PrivateKey
- if signed != nil {
- signKey, ok := signed.signingKey(config.Now())
- if !ok {
- return nil, errors.InvalidArgumentError("no valid signing keys")
- }
- signer = signKey.PrivateKey
- if signer == nil {
- return nil, errors.InvalidArgumentError("no private key in signing key")
- }
- if signer.Encrypted {
- return nil, errors.InvalidArgumentError("signing key must be decrypted")
- }
- }
-
- // These are the possible ciphers that we'll use for the message.
- candidateCiphers := []uint8{
- uint8(packet.CipherAES128),
- uint8(packet.CipherAES256),
- uint8(packet.CipherCAST5),
- }
- // These are the possible hash functions that we'll use for the signature.
- candidateHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.SHA1),
- hashToHashId(crypto.RIPEMD160),
- }
-
- // If no preferences were specified, assume something safe and reasonable.
- defaultCiphers := []uint8{
- uint8(packet.CipherAES128),
- uint8(packet.CipherAES192),
- uint8(packet.CipherAES256),
- uint8(packet.CipherCAST5),
- }
-
- defaultHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.RIPEMD160),
- }
-
- encryptKeys := make([]Key, len(to))
- for i := range to {
- var ok bool
- encryptKeys[i], ok = to[i].encryptionKey(config.Now())
- if !ok {
- return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
- }
-
- sig := to[i].primaryIdentity().SelfSignature
-
- preferredSymmetric := sig.PreferredSymmetric
- if len(preferredSymmetric) == 0 {
- preferredSymmetric = defaultCiphers
- }
- preferredHashes := sig.PreferredHash
- if len(preferredHashes) == 0 {
- preferredHashes = defaultHashes
- }
- candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)
- candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
- }
-
- if len(candidateCiphers) == 0 {
- return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common ciphers")
- }
- if len(candidateHashes) == 0 {
- return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common hashes")
- }
-
- cipher := packet.CipherFunction(candidateCiphers[0])
- // If the cipher specifed by config is a candidate, we'll use that.
- configuredCipher := config.Cipher()
- for _, c := range candidateCiphers {
- cipherFunc := packet.CipherFunction(c)
- if cipherFunc == configuredCipher {
- cipher = cipherFunc
- break
- }
- }
-
- var hash crypto.Hash
- for _, hashId := range candidateHashes {
- if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
- hash = h
- break
- }
- }
-
- // If the hash specified by config is a candidate, we'll use that.
- if configuredHash := config.Hash(); configuredHash.Available() {
- for _, hashId := range candidateHashes {
- if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
- hash = h
- break
- }
- }
- }
-
- if hash == 0 {
- hashId := candidateHashes[0]
- name, ok := s2k.HashIdToString(hashId)
- if !ok {
- name = "#" + strconv.Itoa(int(hashId))
- }
- return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
- }
-
- symKey := make([]byte, cipher.KeySize())
- if _, err := io.ReadFull(config.Random(), symKey); err != nil {
- return nil, err
- }
-
- for _, key := range encryptKeys {
- if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil {
- return nil, err
- }
- }
-
- encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
- if err != nil {
- return
- }
-
- if signer != nil {
- ops := &packet.OnePassSignature{
- SigType: packet.SigTypeBinary,
- Hash: hash,
- PubKeyAlgo: signer.PubKeyAlgo,
- KeyId: signer.KeyId,
- IsLast: true,
- }
- if err := ops.Serialize(encryptedData); err != nil {
- return nil, err
- }
- }
-
- if hints == nil {
- hints = &FileHints{}
- }
-
- w := encryptedData
- if signer != nil {
- // If we need to write a signature packet after the literal
- // data then we need to stop literalData from closing
- // encryptedData.
- w = noOpCloser{encryptedData}
-
- }
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
- if err != nil {
- return nil, err
- }
-
- if signer != nil {
- return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
- }
- return literalData, nil
-}
-
-// signatureWriter hashes the contents of a message while passing it along to
-// literalData. When closed, it closes literalData, writes a signature packet
-// to encryptedData and then also closes encryptedData.
-type signatureWriter struct {
- encryptedData io.WriteCloser
- literalData io.WriteCloser
- hashType crypto.Hash
- h hash.Hash
- signer *packet.PrivateKey
- config *packet.Config
-}
-
-func (s signatureWriter) Write(data []byte) (int, error) {
- s.h.Write(data)
- return s.literalData.Write(data)
-}
-
-func (s signatureWriter) Close() error {
- sig := &packet.Signature{
- SigType: packet.SigTypeBinary,
- PubKeyAlgo: s.signer.PubKeyAlgo,
- Hash: s.hashType,
- CreationTime: s.config.Now(),
- IssuerKeyId: &s.signer.KeyId,
- }
-
- if err := sig.Sign(s.h, s.signer, s.config); err != nil {
- return err
- }
- if err := s.literalData.Close(); err != nil {
- return err
- }
- if err := sig.Serialize(s.encryptedData); err != nil {
- return err
- }
- return s.encryptedData.Close()
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-// TODO: we have two of these in OpenPGP packages alone. This probably needs
-// to be promoted somewhere more common.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-// AttachedSign is like openpgp.Encrypt (as in p.crypto/openpgp/write.go), but
-// don't encrypt at all, just sign the literal unencrypted data.
-// Unfortunately we need to duplicate some code here that's already
-// in write.go
-func AttachedSign(out io.WriteCloser, signed Entity, hints *FileHints,
- config *packet.Config) (in io.WriteCloser, err error) {
-
- if hints == nil {
- hints = &FileHints{}
- }
-
- if config == nil {
- config = &packet.Config{}
- }
-
- var signer *packet.PrivateKey
-
- signKey, ok := signed.signingKey(config.Now())
- if !ok {
- err = errors.InvalidArgumentError("no valid signing keys")
- return
- }
- signer = signKey.PrivateKey
- if signer == nil {
- err = errors.InvalidArgumentError("no valid signing keys")
- return
- }
- if signer.Encrypted {
- err = errors.InvalidArgumentError("signing key must be decrypted")
- return
- }
-
- if algo := config.Compression(); algo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- out, err = packet.SerializeCompressed(out, algo, compConfig)
- if err != nil {
- return
- }
- }
-
- hasher := crypto.SHA512
-
- ops := &packet.OnePassSignature{
- SigType: packet.SigTypeBinary,
- Hash: hasher,
- PubKeyAlgo: signer.PubKeyAlgo,
- KeyId: signer.KeyId,
- IsLast: true,
- }
-
- if err = ops.Serialize(out); err != nil {
- return
- }
-
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
-
- // We don't want the literal serializer to closer the output stream
- // since we're going to need to write to it when we finish up the
- // signature stuff.
- in, err = packet.SerializeLiteral(noOpCloser{out}, hints.IsBinary, hints.FileName, epochSeconds)
-
- if err != nil {
- return
- }
-
- // If we need to write a signature packet after the literal
- // data then we need to stop literalData from closing
- // encryptedData.
- in = signatureWriter{out, in, hasher, hasher.New(), signer, config}
-
- return
-}
diff --git a/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go b/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go
deleted file mode 100644
index 5c5f415c..00000000
--- a/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rsa
-
-import (
- "crypto"
- "crypto/subtle"
- "errors"
- "io"
- "math/big"
-)
-
-// This file implements encryption and decryption using PKCS#1 v1.5 padding.
-
-// PKCS1v15DecrypterOpts is for passing options to PKCS#1 v1.5 decryption using
-// the crypto.Decrypter interface.
-type PKCS1v15DecryptOptions struct {
- // SessionKeyLen is the length of the session key that is being
- // decrypted. If not zero, then a padding error during decryption will
- // cause a random plaintext of this length to be returned rather than
- // an error. These alternatives happen in constant time.
- SessionKeyLen int
-}
-
-// EncryptPKCS1v15 encrypts the given message with RSA and the padding scheme from PKCS#1 v1.5.
-// The message must be no longer than the length of the public modulus minus 11 bytes.
-//
-// The rand parameter is used as a source of entropy to ensure that encrypting
-// the same message twice doesn't result in the same ciphertext.
-//
-// WARNING: use of this function to encrypt plaintexts other than session keys
-// is dangerous. Use RSA OAEP in new protocols.
-func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) (out []byte, err error) {
- if err := checkPub(pub); err != nil {
- return nil, err
- }
- k := (pub.N.BitLen() + 7) / 8
- if len(msg) > k-11 {
- err = ErrMessageTooLong
- return
- }
-
- // EM = 0x00 || 0x02 || PS || 0x00 || M
- em := make([]byte, k)
- em[1] = 2
- ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):]
- err = nonZeroRandomBytes(ps, rand)
- if err != nil {
- return
- }
- em[len(em)-len(msg)-1] = 0
- copy(mm, msg)
-
- m := new(big.Int).SetBytes(em)
- c := encrypt(new(big.Int), pub, m)
-
- copyWithLeftPad(em, c.Bytes())
- out = em
- return
-}
-
-// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5.
-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
-//
-// Note that whether this function returns an error or not discloses secret
-// information. If an attacker can cause this function to run repeatedly and
-// learn whether each instance returned an error then they can decrypt and
-// forge signatures as if they had the private key. See
-// DecryptPKCS1v15SessionKey for a way of solving this problem.
-func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (out []byte, err error) {
- if err := checkPub(&priv.PublicKey); err != nil {
- return nil, err
- }
- valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
- if err != nil {
- return
- }
- if valid == 0 {
- return nil, ErrDecryption
- }
- out = out[index:]
- return
-}
-
-// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS#1 v1.5.
-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
-// It returns an error if the ciphertext is the wrong length or if the
-// ciphertext is greater than the public modulus. Otherwise, no error is
-// returned. If the padding is valid, the resulting plaintext message is copied
-// into key. Otherwise, key is unchanged. These alternatives occur in constant
-// time. It is intended that the user of this function generate a random
-// session key beforehand and continue the protocol with the resulting value.
-// This will remove any possibility that an attacker can learn any information
-// about the plaintext.
-// See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA
-// Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology
-// (Crypto '98).
-//
-// Note that if the session key is too small then it may be possible for an
-// attacker to brute-force it. If they can do that then they can learn whether
-// a random value was used (because it'll be different for the same ciphertext)
-// and thus whether the padding was correct. This defeats the point of this
-// function. Using at least a 16-byte key will protect against this attack.
-func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err error) {
- if err := checkPub(&priv.PublicKey); err != nil {
- return err
- }
- k := (priv.N.BitLen() + 7) / 8
- if k-(len(key)+3+8) < 0 {
- return ErrDecryption
- }
-
- valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext)
- if err != nil {
- return
- }
-
- if len(em) != k {
- // This should be impossible because decryptPKCS1v15 always
- // returns the full slice.
- return ErrDecryption
- }
-
- valid &= subtle.ConstantTimeEq(int32(len(em)-index), int32(len(key)))
- subtle.ConstantTimeCopy(valid, key, em[len(em)-len(key):])
- return
-}
-
-// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
-// rand is not nil. It returns one or zero in valid that indicates whether the
-// plaintext was correctly structured. In either case, the plaintext is
-// returned in em so that it may be read independently of whether it was valid
-// in order to maintain constant memory access patterns. If the plaintext was
-// valid then index contains the index of the original message in em.
-func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
- k := (priv.N.BitLen() + 7) / 8
- if k < 11 {
- err = ErrDecryption
- return
- }
-
- c := new(big.Int).SetBytes(ciphertext)
- m, err := decrypt(rand, priv, c)
- if err != nil {
- return
- }
-
- em = leftPad(m.Bytes(), k)
- firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
- secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2)
-
- // The remainder of the plaintext must be a string of non-zero random
- // octets, followed by a 0, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the zero.
- // index: the offset of the first zero byte.
- lookingForIndex := 1
-
- for i := 2; i < len(em); i++ {
- equals0 := subtle.ConstantTimeByteEq(em[i], 0)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
- }
-
- // The PS padding must be at least 8 bytes long, and it starts two
- // bytes into em.
- validPS := subtle.ConstantTimeLessOrEq(2+8, index)
-
- valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS
- index = subtle.ConstantTimeSelect(valid, index+1, 0)
- return valid, em, index, nil
-}
-
-// nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
- _, err = io.ReadFull(rand, s)
- if err != nil {
- return
- }
-
- for i := 0; i < len(s); i++ {
- for s[i] == 0 {
- _, err = io.ReadFull(rand, s[i:i+1])
- if err != nil {
- return
- }
- // In tests, the PRNG may return all zeros so we do
- // this to break the loop.
- s[i] ^= 0x42
- }
- }
-
- return
-}
-
-// These are ASN1 DER structures:
-// DigestInfo ::= SEQUENCE {
-// digestAlgorithm AlgorithmIdentifier,
-// digest OCTET STRING
-// }
-// For performance, we don't use the generic ASN1 encoder. Rather, we
-// precompute a prefix of the digest value that makes a valid ASN1 DER string
-// with the correct contents.
-var hashPrefixes = map[crypto.Hash][]byte{
- crypto.MD5: {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10},
- crypto.SHA1: {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14},
- crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c},
- crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20},
- crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30},
- crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40},
- crypto.MD5SHA1: {}, // A special TLS case which doesn't use an ASN1 prefix.
- crypto.RIPEMD160: {0x30, 0x20, 0x30, 0x08, 0x06, 0x06, 0x28, 0xcf, 0x06, 0x03, 0x00, 0x31, 0x04, 0x14},
-}
-
-// SignPKCS1v15 calculates the signature of hashed using RSASSA-PKCS1-V1_5-SIGN from RSA PKCS#1 v1.5.
-// Note that hashed must be the result of hashing the input message using the
-// given hash function. If hash is zero, hashed is signed directly. This isn't
-// advisable except for interoperability.
-//
-// If rand is not nil then RSA blinding will be used to avoid timing side-channel attacks.
-//
-// This function is deterministic. Thus, if the set of possible messages is
-// small, an attacker may be able to build a map from messages to signatures
-// and identify the signed messages. As ever, signatures provide authenticity,
-// not confidentiality.
-func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) (s []byte, err error) {
- hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
- if err != nil {
- return
- }
-
- tLen := len(prefix) + hashLen
- k := (priv.N.BitLen() + 7) / 8
- if k < tLen+11 {
- return nil, ErrMessageTooLong
- }
-
- // EM = 0x00 || 0x01 || PS || 0x00 || T
- em := make([]byte, k)
- em[1] = 1
- for i := 2; i < k-tLen-1; i++ {
- em[i] = 0xff
- }
- copy(em[k-tLen:k-hashLen], prefix)
- copy(em[k-hashLen:k], hashed)
-
- m := new(big.Int).SetBytes(em)
- c, err := decryptAndCheck(rand, priv, m)
- if err != nil {
- return
- }
-
- copyWithLeftPad(em, c.Bytes())
- s = em
- return
-}
-
-// VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature.
-// hashed is the result of hashing the input message using the given hash
-// function and sig is the signature. A valid signature is indicated by
-// returning a nil error. If hash is zero then hashed is used directly. This
-// isn't advisable except for interoperability.
-func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) (err error) {
- hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed))
- if err != nil {
- return
- }
-
- tLen := len(prefix) + hashLen
- k := (pub.N.BitLen() + 7) / 8
- if k < tLen+11 {
- err = ErrVerification
- return
- }
-
- c := new(big.Int).SetBytes(sig)
- m := encrypt(new(big.Int), pub, c)
- em := leftPad(m.Bytes(), k)
- // EM = 0x00 || 0x01 || PS || 0x00 || T
-
- ok := subtle.ConstantTimeByteEq(em[0], 0)
- ok &= subtle.ConstantTimeByteEq(em[1], 1)
- ok &= subtle.ConstantTimeCompare(em[k-hashLen:k], hashed)
- ok &= subtle.ConstantTimeCompare(em[k-tLen:k-hashLen], prefix)
- ok &= subtle.ConstantTimeByteEq(em[k-tLen-1], 0)
-
- for i := 2; i < k-tLen-1; i++ {
- ok &= subtle.ConstantTimeByteEq(em[i], 0xff)
- }
-
- if ok != 1 {
- return ErrVerification
- }
-
- return nil
-}
-
-func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err error) {
- // Special case: crypto.Hash(0) is used to indicate that the data is
- // signed directly.
- if hash == 0 {
- return inLen, nil, nil
- }
-
- hashLen = hash.Size()
- if inLen != hashLen {
- return 0, nil, errors.New("crypto/rsa: input must be hashed message")
- }
- prefix, ok := hashPrefixes[hash]
- if !ok {
- return 0, nil, errors.New("crypto/rsa: unsupported hash function")
- }
- return
-}
-
-// copyWithLeftPad copies src to the end of dest, padding with zero bytes as
-// needed.
-func copyWithLeftPad(dest, src []byte) {
- numPaddingBytes := len(dest) - len(src)
- for i := 0; i < numPaddingBytes; i++ {
- dest[i] = 0
- }
- copy(dest[numPaddingBytes:], src)
-}
diff --git a/vendor/github.com/keybase/go-crypto/rsa/pss.go b/vendor/github.com/keybase/go-crypto/rsa/pss.go
deleted file mode 100644
index 8a94589b..00000000
--- a/vendor/github.com/keybase/go-crypto/rsa/pss.go
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package rsa
-
-// This file implements the PSS signature scheme [1].
-//
-// [1] http://www.rsa.com/rsalabs/pkcs/files/h11300-wp-pkcs-1v2-2-rsa-cryptography-standard.pdf
-
-import (
- "bytes"
- "crypto"
- "errors"
- "hash"
- "io"
- "math/big"
-)
-
-func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) {
- // See [1], section 9.1.1
- hLen := hash.Size()
- sLen := len(salt)
- emLen := (emBits + 7) / 8
-
- // 1. If the length of M is greater than the input limitation for the
- // hash function (2^61 - 1 octets for SHA-1), output "message too
- // long" and stop.
- //
- // 2. Let mHash = Hash(M), an octet string of length hLen.
-
- if len(mHash) != hLen {
- return nil, errors.New("crypto/rsa: input must be hashed message")
- }
-
- // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop.
-
- if emLen < hLen+sLen+2 {
- return nil, errors.New("crypto/rsa: encoding error")
- }
-
- em := make([]byte, emLen)
- db := em[:emLen-sLen-hLen-2+1+sLen]
- h := em[emLen-sLen-hLen-2+1+sLen : emLen-1]
-
- // 4. Generate a random octet string salt of length sLen; if sLen = 0,
- // then salt is the empty string.
- //
- // 5. Let
- // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt;
- //
- // M' is an octet string of length 8 + hLen + sLen with eight
- // initial zero octets.
- //
- // 6. Let H = Hash(M'), an octet string of length hLen.
-
- var prefix [8]byte
-
- hash.Write(prefix[:])
- hash.Write(mHash)
- hash.Write(salt)
-
- h = hash.Sum(h[:0])
- hash.Reset()
-
- // 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2
- // zero octets. The length of PS may be 0.
- //
- // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length
- // emLen - hLen - 1.
-
- db[emLen-sLen-hLen-2] = 0x01
- copy(db[emLen-sLen-hLen-1:], salt)
-
- // 9. Let dbMask = MGF(H, emLen - hLen - 1).
- //
- // 10. Let maskedDB = DB \xor dbMask.
-
- mgf1XOR(db, hash, h)
-
- // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in
- // maskedDB to zero.
-
- db[0] &= (0xFF >> uint(8*emLen-emBits))
-
- // 12. Let EM = maskedDB || H || 0xbc.
- em[emLen-1] = 0xBC
-
- // 13. Output EM.
- return em, nil
-}
-
-func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error {
- // 1. If the length of M is greater than the input limitation for the
- // hash function (2^61 - 1 octets for SHA-1), output "inconsistent"
- // and stop.
- //
- // 2. Let mHash = Hash(M), an octet string of length hLen.
- hLen := hash.Size()
- if hLen != len(mHash) {
- return ErrVerification
- }
-
- // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop.
- emLen := (emBits + 7) / 8
- if emLen < hLen+sLen+2 {
- return ErrVerification
- }
-
- // 4. If the rightmost octet of EM does not have hexadecimal value
- // 0xbc, output "inconsistent" and stop.
- if em[len(em)-1] != 0xBC {
- return ErrVerification
- }
-
- // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and
- // let H be the next hLen octets.
- db := em[:emLen-hLen-1]
- h := em[emLen-hLen-1 : len(em)-1]
-
- // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in
- // maskedDB are not all equal to zero, output "inconsistent" and
- // stop.
- if em[0]&(0xFF<> uint(8*emLen-emBits))
-
- if sLen == PSSSaltLengthAuto {
- FindSaltLength:
- for sLen = emLen - (hLen + 2); sLen >= 0; sLen-- {
- switch db[emLen-hLen-sLen-2] {
- case 1:
- break FindSaltLength
- case 0:
- continue
- default:
- return ErrVerification
- }
- }
- if sLen < 0 {
- return ErrVerification
- }
- } else {
- // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero
- // or if the octet at position emLen - hLen - sLen - 1 (the leftmost
- // position is "position 1") does not have hexadecimal value 0x01,
- // output "inconsistent" and stop.
- for _, e := range db[:emLen-hLen-sLen-2] {
- if e != 0x00 {
- return ErrVerification
- }
- }
- if db[emLen-hLen-sLen-2] != 0x01 {
- return ErrVerification
- }
- }
-
- // 11. Let salt be the last sLen octets of DB.
- salt := db[len(db)-sLen:]
-
- // 12. Let
- // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ;
- // M' is an octet string of length 8 + hLen + sLen with eight
- // initial zero octets.
- //
- // 13. Let H' = Hash(M'), an octet string of length hLen.
- var prefix [8]byte
- hash.Write(prefix[:])
- hash.Write(mHash)
- hash.Write(salt)
-
- h0 := hash.Sum(nil)
-
- // 14. If H = H', output "consistent." Otherwise, output "inconsistent."
- if !bytes.Equal(h0, h) {
- return ErrVerification
- }
- return nil
-}
-
-// signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt.
-// Note that hashed must be the result of hashing the input message using the
-// given hash function. salt is a random sequence of bytes whose length will be
-// later used to verify the signature.
-func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) {
- nBits := priv.N.BitLen()
- em, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New())
- if err != nil {
- return
- }
- m := new(big.Int).SetBytes(em)
- c, err := decryptAndCheck(rand, priv, m)
- if err != nil {
- return
- }
- s = make([]byte, (nBits+7)/8)
- copyWithLeftPad(s, c.Bytes())
- return
-}
-
-const (
- // PSSSaltLengthAuto causes the salt in a PSS signature to be as large
- // as possible when signing, and to be auto-detected when verifying.
- PSSSaltLengthAuto = 0
- // PSSSaltLengthEqualsHash causes the salt length to equal the length
- // of the hash used in the signature.
- PSSSaltLengthEqualsHash = -1
-)
-
-// PSSOptions contains options for creating and verifying PSS signatures.
-type PSSOptions struct {
- // SaltLength controls the length of the salt used in the PSS
- // signature. It can either be a number of bytes, or one of the special
- // PSSSaltLength constants.
- SaltLength int
-
- // Hash, if not zero, overrides the hash function passed to SignPSS.
- // This is the only way to specify the hash function when using the
- // crypto.Signer interface.
- Hash crypto.Hash
-}
-
-// HashFunc returns pssOpts.Hash so that PSSOptions implements
-// crypto.SignerOpts.
-func (pssOpts *PSSOptions) HashFunc() crypto.Hash {
- return pssOpts.Hash
-}
-
-func (opts *PSSOptions) saltLength() int {
- if opts == nil {
- return PSSSaltLengthAuto
- }
- return opts.SaltLength
-}
-
-// SignPSS calculates the signature of hashed using RSASSA-PSS [1].
-// Note that hashed must be the result of hashing the input message using the
-// given hash function. The opts argument may be nil, in which case sensible
-// defaults are used.
-func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) (s []byte, err error) {
- saltLength := opts.saltLength()
- switch saltLength {
- case PSSSaltLengthAuto:
- saltLength = (priv.N.BitLen()+7)/8 - 2 - hash.Size()
- case PSSSaltLengthEqualsHash:
- saltLength = hash.Size()
- }
-
- if opts != nil && opts.Hash != 0 {
- hash = opts.Hash
- }
-
- salt := make([]byte, saltLength)
- if _, err = io.ReadFull(rand, salt); err != nil {
- return
- }
- return signPSSWithSalt(rand, priv, hash, hashed, salt)
-}
-
-// VerifyPSS verifies a PSS signature.
-// hashed is the result of hashing the input message using the given hash
-// function and sig is the signature. A valid signature is indicated by
-// returning a nil error. The opts argument may be nil, in which case sensible
-// defaults are used.
-func VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error {
- return verifyPSS(pub, hash, hashed, sig, opts.saltLength())
-}
-
-// verifyPSS verifies a PSS signature with the given salt length.
-func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error {
- nBits := pub.N.BitLen()
- if len(sig) != (nBits+7)/8 {
- return ErrVerification
- }
- s := new(big.Int).SetBytes(sig)
- m := encrypt(new(big.Int), pub, s)
- emBits := nBits - 1
- emLen := (emBits + 7) / 8
- if emLen < len(m.Bytes()) {
- return ErrVerification
- }
- em := make([]byte, emLen)
- copyWithLeftPad(em, m.Bytes())
- if saltLen == PSSSaltLengthEqualsHash {
- saltLen = hash.Size()
- }
- return emsaPSSVerify(hashed, em, emBits, saltLen, hash.New())
-}
diff --git a/vendor/github.com/keybase/go-crypto/rsa/rsa.go b/vendor/github.com/keybase/go-crypto/rsa/rsa.go
deleted file mode 100644
index ff6b11b3..00000000
--- a/vendor/github.com/keybase/go-crypto/rsa/rsa.go
+++ /dev/null
@@ -1,646 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package rsa implements RSA encryption as specified in PKCS#1.
-//
-// RSA is a single, fundamental operation that is used in this package to
-// implement either public-key encryption or public-key signatures.
-//
-// The original specification for encryption and signatures with RSA is PKCS#1
-// and the terms "RSA encryption" and "RSA signatures" by default refer to
-// PKCS#1 version 1.5. However, that specification has flaws and new designs
-// should use version two, usually called by just OAEP and PSS, where
-// possible.
-//
-// Two sets of interfaces are included in this package. When a more abstract
-// interface isn't neccessary, there are functions for encrypting/decrypting
-// with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract
-// over the public-key primitive, the PrivateKey struct implements the
-// Decrypter and Signer interfaces from the crypto package.
-package rsa
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "hash"
- "io"
- "math/big"
-)
-
-var bigZero = big.NewInt(0)
-var bigOne = big.NewInt(1)
-
-// A PublicKey represents the public part of an RSA key.
-type PublicKey struct {
- N *big.Int // modulus
- E int64 // public exponent
-}
-
-// OAEPOptions is an interface for passing options to OAEP decryption using the
-// crypto.Decrypter interface.
-type OAEPOptions struct {
- // Hash is the hash function that will be used when generating the mask.
- Hash crypto.Hash
- // Label is an arbitrary byte string that must be equal to the value
- // used when encrypting.
- Label []byte
-}
-
-var (
- errPublicModulus = errors.New("crypto/rsa: missing public modulus")
- errPublicExponentSmall = errors.New("crypto/rsa: public exponent too small")
- errPublicExponentLarge = errors.New("crypto/rsa: public exponent too large")
-)
-
-// checkPub sanity checks the public key before we use it.
-// We require pub.E to fit into a 32-bit integer so that we
-// do not have different behavior depending on whether
-// int is 32 or 64 bits. See also
-// http://www.imperialviolet.org/2012/03/16/rsae.html.
-func checkPub(pub *PublicKey) error {
- if pub.N == nil {
- return errPublicModulus
- }
- if pub.E < 2 {
- return errPublicExponentSmall
- }
- if pub.E > 1<<63-1 {
- return errPublicExponentLarge
- }
- return nil
-}
-
-// A PrivateKey represents an RSA key
-type PrivateKey struct {
- PublicKey // public part.
- D *big.Int // private exponent
- Primes []*big.Int // prime factors of N, has >= 2 elements.
-
- // Precomputed contains precomputed values that speed up private
- // operations, if available.
- Precomputed PrecomputedValues
-}
-
-// Public returns the public key corresponding to priv.
-func (priv *PrivateKey) Public() crypto.PublicKey {
- return &priv.PublicKey
-}
-
-// Sign signs msg with priv, reading randomness from rand. If opts is a
-// *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will
-// be used. This method is intended to support keys where the private part is
-// kept in, for example, a hardware module. Common uses should use the Sign*
-// functions in this package.
-func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
- if pssOpts, ok := opts.(*PSSOptions); ok {
- return SignPSS(rand, priv, pssOpts.Hash, msg, pssOpts)
- }
-
- return SignPKCS1v15(rand, priv, opts.HashFunc(), msg)
-}
-
-// Decrypt decrypts ciphertext with priv. If opts is nil or of type
-// *PKCS1v15DecryptOptions then PKCS#1 v1.5 decryption is performed. Otherwise
-// opts must have type *OAEPOptions and OAEP decryption is done.
-func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) {
- if opts == nil {
- return DecryptPKCS1v15(rand, priv, ciphertext)
- }
-
- switch opts := opts.(type) {
- case *OAEPOptions:
- return DecryptOAEP(opts.Hash.New(), rand, priv, ciphertext, opts.Label)
-
- case *PKCS1v15DecryptOptions:
- if l := opts.SessionKeyLen; l > 0 {
- plaintext = make([]byte, l)
- if _, err := io.ReadFull(rand, plaintext); err != nil {
- return nil, err
- }
- if err := DecryptPKCS1v15SessionKey(rand, priv, ciphertext, plaintext); err != nil {
- return nil, err
- }
- return plaintext, nil
- } else {
- return DecryptPKCS1v15(rand, priv, ciphertext)
- }
-
- default:
- return nil, errors.New("crypto/rsa: invalid options for Decrypt")
- }
-}
-
-type PrecomputedValues struct {
- Dp, Dq *big.Int // D mod (P-1) (or mod Q-1)
- Qinv *big.Int // Q^-1 mod P
-
- // CRTValues is used for the 3rd and subsequent primes. Due to a
- // historical accident, the CRT for the first two primes is handled
- // differently in PKCS#1 and interoperability is sufficiently
- // important that we mirror this.
- CRTValues []CRTValue
-}
-
-// CRTValue contains the precomputed Chinese remainder theorem values.
-type CRTValue struct {
- Exp *big.Int // D mod (prime-1).
- Coeff *big.Int // R·Coeff ≡ 1 mod Prime.
- R *big.Int // product of primes prior to this (inc p and q).
-}
-
-// Validate performs basic sanity checks on the key.
-// It returns nil if the key is valid, or else an error describing a problem.
-func (priv *PrivateKey) Validate() error {
- if err := checkPub(&priv.PublicKey); err != nil {
- return err
- }
-
- // Check that Πprimes == n.
- modulus := new(big.Int).Set(bigOne)
- for _, prime := range priv.Primes {
- // Any primes ≤ 1 will cause divide-by-zero panics later.
- if prime.Cmp(bigOne) <= 0 {
- return errors.New("crypto/rsa: invalid prime value")
- }
- modulus.Mul(modulus, prime)
- }
- if modulus.Cmp(priv.N) != 0 {
- return errors.New("crypto/rsa: invalid modulus")
- }
-
- // Check that de ≡ 1 mod p-1, for each prime.
- // This implies that e is coprime to each p-1 as e has a multiplicative
- // inverse. Therefore e is coprime to lcm(p-1,q-1,r-1,...) =
- // exponent(ℤ/nℤ). It also implies that a^de ≡ a mod p as a^(p-1) ≡ 1
- // mod p. Thus a^de ≡ a mod n for all a coprime to n, as required.
- congruence := new(big.Int)
- de := new(big.Int).SetInt64(int64(priv.E))
- de.Mul(de, priv.D)
- for _, prime := range priv.Primes {
- pminus1 := new(big.Int).Sub(prime, bigOne)
- congruence.Mod(de, pminus1)
- if congruence.Cmp(bigOne) != 0 {
- return errors.New("crypto/rsa: invalid exponents")
- }
- }
- return nil
-}
-
-// GenerateKey generates an RSA keypair of the given bit size using the
-// random source random (for example, crypto/rand.Reader).
-func GenerateKey(random io.Reader, bits int) (priv *PrivateKey, err error) {
- return GenerateMultiPrimeKey(random, 2, bits)
-}
-
-// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit
-// size and the given random source, as suggested in [1]. Although the public
-// keys are compatible (actually, indistinguishable) from the 2-prime case,
-// the private keys are not. Thus it may not be possible to export multi-prime
-// private keys in certain formats or to subsequently import them into other
-// code.
-//
-// Table 1 in [2] suggests maximum numbers of primes for a given size.
-//
-// [1] US patent 4405829 (1972, expired)
-// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf
-func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.E = 65537
-
- if nprimes < 2 {
- return nil, errors.New("crypto/rsa: GenerateMultiPrimeKey: nprimes must be >= 2")
- }
-
- primes := make([]*big.Int, nprimes)
-
-NextSetOfPrimes:
- for {
- todo := bits
- // crypto/rand should set the top two bits in each prime.
- // Thus each prime has the form
- // p_i = 2^bitlen(p_i) × 0.11... (in base 2).
- // And the product is:
- // P = 2^todo × α
- // where α is the product of nprimes numbers of the form 0.11...
- //
- // If α < 1/2 (which can happen for nprimes > 2), we need to
- // shift todo to compensate for lost bits: the mean value of 0.11...
- // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2
- // will give good results.
- if nprimes >= 7 {
- todo += (nprimes - 2) / 5
- }
- for i := 0; i < nprimes; i++ {
- primes[i], err = rand.Prime(random, todo/(nprimes-i))
- if err != nil {
- return nil, err
- }
- todo -= primes[i].BitLen()
- }
-
- // Make sure that primes is pairwise unequal.
- for i, prime := range primes {
- for j := 0; j < i; j++ {
- if prime.Cmp(primes[j]) == 0 {
- continue NextSetOfPrimes
- }
- }
- }
-
- n := new(big.Int).Set(bigOne)
- totient := new(big.Int).Set(bigOne)
- pminus1 := new(big.Int)
- for _, prime := range primes {
- n.Mul(n, prime)
- pminus1.Sub(prime, bigOne)
- totient.Mul(totient, pminus1)
- }
- if n.BitLen() != bits {
- // This should never happen for nprimes == 2 because
- // crypto/rand should set the top two bits in each prime.
- // For nprimes > 2 we hope it does not happen often.
- continue NextSetOfPrimes
- }
-
- g := new(big.Int)
- priv.D = new(big.Int)
- y := new(big.Int)
- e := big.NewInt(int64(priv.E))
- g.GCD(priv.D, y, e, totient)
-
- if g.Cmp(bigOne) == 0 {
- if priv.D.Sign() < 0 {
- priv.D.Add(priv.D, totient)
- }
- priv.Primes = primes
- priv.N = n
-
- break
- }
- }
-
- priv.Precompute()
- return
-}
-
-// incCounter increments a four byte, big-endian counter.
-func incCounter(c *[4]byte) {
- if c[3]++; c[3] != 0 {
- return
- }
- if c[2]++; c[2] != 0 {
- return
- }
- if c[1]++; c[1] != 0 {
- return
- }
- c[0]++
-}
-
-// mgf1XOR XORs the bytes in out with a mask generated using the MGF1 function
-// specified in PKCS#1 v2.1.
-func mgf1XOR(out []byte, hash hash.Hash, seed []byte) {
- var counter [4]byte
- var digest []byte
-
- done := 0
- for done < len(out) {
- hash.Write(seed)
- hash.Write(counter[0:4])
- digest = hash.Sum(digest[:0])
- hash.Reset()
-
- for i := 0; i < len(digest) && done < len(out); i++ {
- out[done] ^= digest[i]
- done++
- }
- incCounter(&counter)
- }
-}
-
-// ErrMessageTooLong is returned when attempting to encrypt a message which is
-// too large for the size of the public key.
-var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size")
-
-func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int {
- e := big.NewInt(int64(pub.E))
- c.Exp(m, e, pub.N)
- return c
-}
-
-// EncryptOAEP encrypts the given message with RSA-OAEP.
-//
-// OAEP is parameterised by a hash function that is used as a random oracle.
-// Encryption and decryption of a given message must use the same hash function
-// and sha256.New() is a reasonable choice.
-//
-// The random parameter is used as a source of entropy to ensure that
-// encrypting the same message twice doesn't result in the same ciphertext.
-//
-// The label parameter may contain arbitrary data that will not be encrypted,
-// but which gives important context to the message. For example, if a given
-// public key is used to decrypt two types of messages then distinct label
-// values could be used to ensure that a ciphertext for one purpose cannot be
-// used for another by an attacker. If not required it can be empty.
-//
-// The message must be no longer than the length of the public modulus less
-// twice the hash length plus 2.
-func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err error) {
- if err := checkPub(pub); err != nil {
- return nil, err
- }
- hash.Reset()
- k := (pub.N.BitLen() + 7) / 8
- if len(msg) > k-2*hash.Size()-2 {
- err = ErrMessageTooLong
- return
- }
-
- hash.Write(label)
- lHash := hash.Sum(nil)
- hash.Reset()
-
- em := make([]byte, k)
- seed := em[1 : 1+hash.Size()]
- db := em[1+hash.Size():]
-
- copy(db[0:hash.Size()], lHash)
- db[len(db)-len(msg)-1] = 1
- copy(db[len(db)-len(msg):], msg)
-
- _, err = io.ReadFull(random, seed)
- if err != nil {
- return
- }
-
- mgf1XOR(db, hash, seed)
- mgf1XOR(seed, hash, db)
-
- m := new(big.Int)
- m.SetBytes(em)
- c := encrypt(new(big.Int), pub, m)
- out = c.Bytes()
-
- if len(out) < k {
- // If the output is too small, we need to left-pad with zeros.
- t := make([]byte, k)
- copy(t[k-len(out):], out)
- out = t
- }
-
- return
-}
-
-// ErrDecryption represents a failure to decrypt a message.
-// It is deliberately vague to avoid adaptive attacks.
-var ErrDecryption = errors.New("crypto/rsa: decryption error")
-
-// ErrVerification represents a failure to verify a signature.
-// It is deliberately vague to avoid adaptive attacks.
-var ErrVerification = errors.New("crypto/rsa: verification error")
-
-// modInverse returns ia, the inverse of a in the multiplicative group of prime
-// order n. It requires that a be a member of the group (i.e. less than n).
-func modInverse(a, n *big.Int) (ia *big.Int, ok bool) {
- g := new(big.Int)
- x := new(big.Int)
- y := new(big.Int)
- g.GCD(x, y, a, n)
- if g.Cmp(bigOne) != 0 {
- // In this case, a and n aren't coprime and we cannot calculate
- // the inverse. This happens because the values of n are nearly
- // prime (being the product of two primes) rather than truly
- // prime.
- return
- }
-
- if x.Cmp(bigOne) < 0 {
- // 0 is not the multiplicative inverse of any element so, if x
- // < 1, then x is negative.
- x.Add(x, n)
- }
-
- return x, true
-}
-
-// Precompute performs some calculations that speed up private key operations
-// in the future.
-func (priv *PrivateKey) Precompute() {
- if priv.Precomputed.Dp != nil {
- return
- }
-
- priv.Precomputed.Dp = new(big.Int).Sub(priv.Primes[0], bigOne)
- priv.Precomputed.Dp.Mod(priv.D, priv.Precomputed.Dp)
-
- priv.Precomputed.Dq = new(big.Int).Sub(priv.Primes[1], bigOne)
- priv.Precomputed.Dq.Mod(priv.D, priv.Precomputed.Dq)
-
- priv.Precomputed.Qinv = new(big.Int).ModInverse(priv.Primes[1], priv.Primes[0])
-
- r := new(big.Int).Mul(priv.Primes[0], priv.Primes[1])
- priv.Precomputed.CRTValues = make([]CRTValue, len(priv.Primes)-2)
- for i := 2; i < len(priv.Primes); i++ {
- prime := priv.Primes[i]
- values := &priv.Precomputed.CRTValues[i-2]
-
- values.Exp = new(big.Int).Sub(prime, bigOne)
- values.Exp.Mod(priv.D, values.Exp)
-
- values.R = new(big.Int).Set(r)
- values.Coeff = new(big.Int).ModInverse(r, prime)
-
- r.Mul(r, prime)
- }
-}
-
-// decrypt performs an RSA decryption, resulting in a plaintext integer. If a
-// random source is given, RSA blinding is used.
-func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
- // TODO(agl): can we get away with reusing blinds?
- if c.Cmp(priv.N) > 0 {
- err = ErrDecryption
- return
- }
-
- var ir *big.Int
- if random != nil {
- // Blinding enabled. Blinding involves multiplying c by r^e.
- // Then the decryption operation performs (m^e * r^e)^d mod n
- // which equals mr mod n. The factor of r can then be removed
- // by multiplying by the multiplicative inverse of r.
-
- var r *big.Int
-
- for {
- r, err = rand.Int(random, priv.N)
- if err != nil {
- return
- }
- if r.Cmp(bigZero) == 0 {
- r = bigOne
- }
- var ok bool
- ir, ok = modInverse(r, priv.N)
- if ok {
- break
- }
- }
- bigE := big.NewInt(int64(priv.E))
- rpowe := new(big.Int).Exp(r, bigE, priv.N)
- cCopy := new(big.Int).Set(c)
- cCopy.Mul(cCopy, rpowe)
- cCopy.Mod(cCopy, priv.N)
- c = cCopy
- }
-
- if priv.Precomputed.Dp == nil {
- m = new(big.Int).Exp(c, priv.D, priv.N)
- } else {
- // We have the precalculated values needed for the CRT.
- m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0])
- m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1])
- m.Sub(m, m2)
- if m.Sign() < 0 {
- m.Add(m, priv.Primes[0])
- }
- m.Mul(m, priv.Precomputed.Qinv)
- m.Mod(m, priv.Primes[0])
- m.Mul(m, priv.Primes[1])
- m.Add(m, m2)
-
- for i, values := range priv.Precomputed.CRTValues {
- prime := priv.Primes[2+i]
- m2.Exp(c, values.Exp, prime)
- m2.Sub(m2, m)
- m2.Mul(m2, values.Coeff)
- m2.Mod(m2, prime)
- if m2.Sign() < 0 {
- m2.Add(m2, prime)
- }
- m2.Mul(m2, values.R)
- m.Add(m, m2)
- }
- }
-
- if ir != nil {
- // Unblind.
- m.Mul(m, ir)
- m.Mod(m, priv.N)
- }
-
- return
-}
-
-func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) {
- m, err = decrypt(random, priv, c)
- if err != nil {
- return nil, err
- }
-
- // In order to defend against errors in the CRT computation, m^e is
- // calculated, which should match the original ciphertext.
- check := encrypt(new(big.Int), &priv.PublicKey, m)
- if c.Cmp(check) != 0 {
- return nil, errors.New("rsa: internal error")
- }
- return m, nil
-}
-
-// DecryptOAEP decrypts ciphertext using RSA-OAEP.
-
-// OAEP is parameterised by a hash function that is used as a random oracle.
-// Encryption and decryption of a given message must use the same hash function
-// and sha256.New() is a reasonable choice.
-//
-// The random parameter, if not nil, is used to blind the private-key operation
-// and avoid timing side-channel attacks. Blinding is purely internal to this
-// function – the random data need not match that used when encrypting.
-//
-// The label parameter must match the value given when encrypting. See
-// EncryptOAEP for details.
-func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err error) {
- if err := checkPub(&priv.PublicKey); err != nil {
- return nil, err
- }
- k := (priv.N.BitLen() + 7) / 8
- if len(ciphertext) > k ||
- k < hash.Size()*2+2 {
- err = ErrDecryption
- return
- }
-
- c := new(big.Int).SetBytes(ciphertext)
-
- m, err := decrypt(random, priv, c)
- if err != nil {
- return
- }
-
- hash.Write(label)
- lHash := hash.Sum(nil)
- hash.Reset()
-
- // Converting the plaintext number to bytes will strip any
- // leading zeros so we may have to left pad. We do this unconditionally
- // to avoid leaking timing information. (Although we still probably
- // leak the number of leading zeros. It's not clear that we can do
- // anything about this.)
- em := leftPad(m.Bytes(), k)
-
- firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0)
-
- seed := em[1 : hash.Size()+1]
- db := em[hash.Size()+1:]
-
- mgf1XOR(seed, hash, db)
- mgf1XOR(db, hash, seed)
-
- lHash2 := db[0:hash.Size()]
-
- // We have to validate the plaintext in constant time in order to avoid
- // attacks like: J. Manger. A Chosen Ciphertext Attack on RSA Optimal
- // Asymmetric Encryption Padding (OAEP) as Standardized in PKCS #1
- // v2.0. In J. Kilian, editor, Advances in Cryptology.
- lHash2Good := subtle.ConstantTimeCompare(lHash, lHash2)
-
- // The remainder of the plaintext must be zero or more 0x00, followed
- // by 0x01, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the 0x01
- // index: the offset of the first 0x01 byte
- // invalid: 1 iff we saw a non-zero byte before the 0x01.
- var lookingForIndex, index, invalid int
- lookingForIndex = 1
- rest := db[hash.Size():]
-
- for i := 0; i < len(rest); i++ {
- equals0 := subtle.ConstantTimeByteEq(rest[i], 0)
- equals1 := subtle.ConstantTimeByteEq(rest[i], 1)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex)
- invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid)
- }
-
- if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 {
- err = ErrDecryption
- return
- }
-
- msg = rest[index+1:]
- return
-}
-
-// leftPad returns a new slice of length size. The contents of input are right
-// aligned in the new slice.
-func leftPad(input []byte, size int) (out []byte) {
- n := len(input)
- if n > size {
- n = size
- }
- out = make([]byte, size)
- copy(out[len(out)-n:], input)
- return
-}
diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore
deleted file mode 100644
index 0f1d00e1..00000000
--- a/vendor/github.com/lib/pq/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.db
-*.test
-*~
-*.swp
diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh
deleted file mode 100644
index a297dc45..00000000
--- a/vendor/github.com/lib/pq/.travis.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-client_configure() {
- sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
-}
-
-pgdg_repository() {
- local sourcelist='sources.list.d/postgresql.list'
-
- curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
- echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
- sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
-}
-
-postgresql_configure() {
- sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
- local all all trust
- hostnossl all pqgossltest 127.0.0.1/32 reject
- hostnossl all pqgosslcert 127.0.0.1/32 reject
- hostssl all pqgossltest 127.0.0.1/32 trust
- hostssl all pqgosslcert 127.0.0.1/32 cert
- host all all 127.0.0.1/32 trust
- hostnossl all pqgossltest ::1/128 reject
- hostnossl all pqgosslcert ::1/128 reject
- hostssl all pqgossltest ::1/128 trust
- hostssl all pqgosslcert ::1/128 cert
- host all all ::1/128 trust
- config
-
- xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
- certs/root.crt
- certs/server.crt
- certs/server.key
- certificates
-
- sort -VCu <<-versions ||
- $PGVERSION
- 9.2
- versions
- sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
- ssl_ca_file = 'root.crt'
- ssl_cert_file = 'server.crt'
- ssl_key_file = 'server.key'
- config
-
- echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null
-
- sudo service postgresql restart
-}
-
-postgresql_install() {
- xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
- postgresql-$PGVERSION
- postgresql-server-dev-$PGVERSION
- postgresql-contrib-$PGVERSION
- packages
-}
-
-postgresql_uninstall() {
- sudo service postgresql stop
- xargs sudo apt-get -y --purge remove <<-packages
- libpq-dev
- libpq5
- postgresql
- postgresql-client-common
- postgresql-common
- packages
- sudo rm -rf /var/lib/postgresql
-}
-
-megacheck_install() {
- # Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous
- # new error messages in old code.
- go get -d honnef.co/go/tools/...
- git -C $GOPATH/src/honnef.co/go/tools/ checkout $MEGACHECK_VERSION
- go install honnef.co/go/tools/cmd/megacheck
- megacheck --version
-}
-
-golint_install() {
- go get github.com/golang/lint/golint
-}
-
-$1
diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml
deleted file mode 100644
index 18556e08..00000000
--- a/vendor/github.com/lib/pq/.travis.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-language: go
-
-go:
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - master
-
-sudo: true
-
-env:
- global:
- - PGUSER=postgres
- - PQGOSSLTESTS=1
- - PQSSLCERTTEST_PATH=$PWD/certs
- - PGHOST=127.0.0.1
- - MEGACHECK_VERSION=2017.2.2
- matrix:
- - PGVERSION=10
- - PGVERSION=9.6
- - PGVERSION=9.5
- - PGVERSION=9.4
- - PGVERSION=9.3
- - PGVERSION=9.2
- - PGVERSION=9.1
- - PGVERSION=9.0
-
-before_install:
- - ./.travis.sh postgresql_uninstall
- - ./.travis.sh pgdg_repository
- - ./.travis.sh postgresql_install
- - ./.travis.sh postgresql_configure
- - ./.travis.sh client_configure
- - ./.travis.sh megacheck_install
- - ./.travis.sh golint_install
- - go get golang.org/x/tools/cmd/goimports
-
-before_script:
- - createdb pqgotest
- - createuser -DRS pqgossltest
- - createuser -DRS pqgosslcert
-
-script:
- - >
- goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- - go vet ./...
- - megacheck -go 1.8 ./...
- - golint ./...
- - PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
- - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...
diff --git a/vendor/github.com/lib/pq/CONTRIBUTING.md b/vendor/github.com/lib/pq/CONTRIBUTING.md
deleted file mode 100644
index 84c937f1..00000000
--- a/vendor/github.com/lib/pq/CONTRIBUTING.md
+++ /dev/null
@@ -1,29 +0,0 @@
-## Contributing to pq
-
-`pq` has a backlog of pull requests, but contributions are still very
-much welcome. You can help with patch review, submitting bug reports,
-or adding new functionality. There is no formal style guide, but
-please conform to the style of existing code and general Go formatting
-conventions when submitting patches.
-
-### Patch review
-
-Help review existing open pull requests by commenting on the code or
-proposed functionality.
-
-### Bug reports
-
-We appreciate any bug reports, but especially ones with self-contained
-(doesn't depend on code outside of pq), minimal (can't be simplified
-further) test cases. It's especially helpful if you can submit a pull
-request with just the failing test case (you'll probably want to
-pattern it after the tests in
-[conn_test.go](https://github.com/lib/pq/blob/master/conn_test.go).
-
-### New functionality
-
-There are a number of pending patches for new functionality, so
-additional feature patches will take a while to merge. Still, patches
-are generally reviewed based on usefulness and complexity in addition
-to time-in-queue, so if you have a knockout idea, take a shot. Feel
-free to open an issue discussion your proposed patch beforehand.
diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md
deleted file mode 100644
index 5773904a..00000000
--- a/vendor/github.com/lib/pq/LICENSE.md
+++ /dev/null
@@ -1,8 +0,0 @@
-Copyright (c) 2011-2013, 'pq' Contributors
-Portions Copyright (C) 2011 Blake Mizerany
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md
deleted file mode 100644
index d71f3c2c..00000000
--- a/vendor/github.com/lib/pq/README.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# pq - A pure Go postgres driver for Go's database/sql package
-
-[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq)
-[![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq)
-
-## Install
-
- go get github.com/lib/pq
-
-## Docs
-
-For detailed documentation and basic usage examples, please see the package
-documentation at .
-
-## Tests
-
-`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
-
-## Features
-
-* SSL
-* Handles bad connections for `database/sql`
-* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
-* Scan binary blobs correctly (i.e. `bytea`)
-* Package for `hstore` support
-* COPY FROM support
-* pq.ParseURL for converting urls to connection strings for sql.Open.
-* Many libpq compatible environment variables
-* Unix socket support
-* Notifications: `LISTEN`/`NOTIFY`
-* pgpass support
-
-## Future / Things you can help with
-
-* Better COPY FROM / COPY TO (see discussion in #181)
-
-## Thank you (alphabetical)
-
-Some of these contributors are from the original library `bmizerany/pq.go` whose
-code still exists in here.
-
-* Andy Balholm (andybalholm)
-* Ben Berkert (benburkert)
-* Benjamin Heatwole (bheatwole)
-* Bill Mill (llimllib)
-* Bjørn Madsen (aeons)
-* Blake Gentry (bgentry)
-* Brad Fitzpatrick (bradfitz)
-* Charlie Melbye (cmelbye)
-* Chris Bandy (cbandy)
-* Chris Gilling (cgilling)
-* Chris Walsh (cwds)
-* Dan Sosedoff (sosedoff)
-* Daniel Farina (fdr)
-* Eric Chlebek (echlebek)
-* Eric Garrido (minusnine)
-* Eric Urban (hydrogen18)
-* Everyone at The Go Team
-* Evan Shaw (edsrzf)
-* Ewan Chou (coocood)
-* Fazal Majid (fazalmajid)
-* Federico Romero (federomero)
-* Fumin (fumin)
-* Gary Burd (garyburd)
-* Heroku (heroku)
-* James Pozdena (jpoz)
-* Jason McVetta (jmcvetta)
-* Jeremy Jay (pbnjay)
-* Joakim Sernbrant (serbaut)
-* John Gallagher (jgallagher)
-* Jonathan Rudenberg (titanous)
-* Joël Stemmer (jstemmer)
-* Kamil Kisiel (kisielk)
-* Kelly Dunn (kellydunn)
-* Keith Rarick (kr)
-* Kir Shatrov (kirs)
-* Lann Martin (lann)
-* Maciek Sakrejda (uhoh-itsmaciek)
-* Marc Brinkmann (mbr)
-* Marko Tiikkaja (johto)
-* Matt Newberry (MattNewberry)
-* Matt Robenolt (mattrobenolt)
-* Martin Olsen (martinolsen)
-* Mike Lewis (mikelikespie)
-* Nicolas Patry (Narsil)
-* Oliver Tonnhofer (olt)
-* Patrick Hayes (phayes)
-* Paul Hammond (paulhammond)
-* Ryan Smith (ryandotsmith)
-* Samuel Stauffer (samuel)
-* Timothée Peignier (cyberdelia)
-* Travis Cline (tmc)
-* TruongSinh Tran-Nguyen (truongsinh)
-* Yaismel Miranda (ympons)
-* notedit (notedit)
diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md
deleted file mode 100644
index f0502111..00000000
--- a/vendor/github.com/lib/pq/TESTS.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Tests
-
-## Running Tests
-
-`go test` is used for testing. A running PostgreSQL
-server is required, with the ability to log in. The
-database to connect to test with is "pqgotest," on
-"localhost" but these can be overridden using [environment
-variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).
-
-Example:
-
- PGHOST=/run/postgresql go test
-
-## Benchmarks
-
-A benchmark suite can be run as part of the tests:
-
- go test -bench .
-
-## Example setup (Docker)
-
-Run a postgres container:
-
-```
-docker run --expose 5432:5432 postgres
-```
-
-Run tests:
-
-```
-PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
-```
diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go
deleted file mode 100644
index e4933e22..00000000
--- a/vendor/github.com/lib/pq/array.go
+++ /dev/null
@@ -1,756 +0,0 @@
-package pq
-
-import (
- "bytes"
- "database/sql"
- "database/sql/driver"
- "encoding/hex"
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-var typeByteSlice = reflect.TypeOf([]byte{})
-var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
-
-// Array returns the optimal driver.Valuer and sql.Scanner for an array or
-// slice of any dimension.
-//
-// For example:
-// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
-//
-// var x []sql.NullInt64
-// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
-//
-// Scanning multi-dimensional arrays is not supported. Arrays where the lower
-// bound is not one (such as `[0:0]={1}') are not supported.
-func Array(a interface{}) interface {
- driver.Valuer
- sql.Scanner
-} {
- switch a := a.(type) {
- case []bool:
- return (*BoolArray)(&a)
- case []float64:
- return (*Float64Array)(&a)
- case []int64:
- return (*Int64Array)(&a)
- case []string:
- return (*StringArray)(&a)
-
- case *[]bool:
- return (*BoolArray)(a)
- case *[]float64:
- return (*Float64Array)(a)
- case *[]int64:
- return (*Int64Array)(a)
- case *[]string:
- return (*StringArray)(a)
- }
-
- return GenericArray{a}
-}
-
-// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
-// to override the array delimiter used by GenericArray.
-type ArrayDelimiter interface {
- // ArrayDelimiter returns the delimiter character(s) for this element's type.
- ArrayDelimiter() string
-}
-
-// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
-type BoolArray []bool
-
-// Scan implements the sql.Scanner interface.
-func (a *BoolArray) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- return a.scanBytes(src)
- case string:
- return a.scanBytes([]byte(src))
- case nil:
- *a = nil
- return nil
- }
-
- return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
-}
-
-func (a *BoolArray) scanBytes(src []byte) error {
- elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
- if err != nil {
- return err
- }
- if *a != nil && len(elems) == 0 {
- *a = (*a)[:0]
- } else {
- b := make(BoolArray, len(elems))
- for i, v := range elems {
- if len(v) != 1 {
- return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
- }
- switch v[0] {
- case 't':
- b[i] = true
- case 'f':
- b[i] = false
- default:
- return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
- }
- }
- *a = b
- }
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (a BoolArray) Value() (driver.Value, error) {
- if a == nil {
- return nil, nil
- }
-
- if n := len(a); n > 0 {
- // There will be exactly two curly brackets, N bytes of values,
- // and N-1 bytes of delimiters.
- b := make([]byte, 1+2*n)
-
- for i := 0; i < n; i++ {
- b[2*i] = ','
- if a[i] {
- b[1+2*i] = 't'
- } else {
- b[1+2*i] = 'f'
- }
- }
-
- b[0] = '{'
- b[2*n] = '}'
-
- return string(b), nil
- }
-
- return "{}", nil
-}
-
-// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
-type ByteaArray [][]byte
-
-// Scan implements the sql.Scanner interface.
-func (a *ByteaArray) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- return a.scanBytes(src)
- case string:
- return a.scanBytes([]byte(src))
- case nil:
- *a = nil
- return nil
- }
-
- return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
-}
-
-func (a *ByteaArray) scanBytes(src []byte) error {
- elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
- if err != nil {
- return err
- }
- if *a != nil && len(elems) == 0 {
- *a = (*a)[:0]
- } else {
- b := make(ByteaArray, len(elems))
- for i, v := range elems {
- b[i], err = parseBytea(v)
- if err != nil {
- return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
- }
- }
- *a = b
- }
- return nil
-}
-
-// Value implements the driver.Valuer interface. It uses the "hex" format which
-// is only supported on PostgreSQL 9.0 or newer.
-func (a ByteaArray) Value() (driver.Value, error) {
- if a == nil {
- return nil, nil
- }
-
- if n := len(a); n > 0 {
- // There will be at least two curly brackets, 2*N bytes of quotes,
- // 3*N bytes of hex formatting, and N-1 bytes of delimiters.
- size := 1 + 6*n
- for _, x := range a {
- size += hex.EncodedLen(len(x))
- }
-
- b := make([]byte, size)
-
- for i, s := 0, b; i < n; i++ {
- o := copy(s, `,"\\x`)
- o += hex.Encode(s[o:], a[i])
- s[o] = '"'
- s = s[o+1:]
- }
-
- b[0] = '{'
- b[size-1] = '}'
-
- return string(b), nil
- }
-
- return "{}", nil
-}
-
-// Float64Array represents a one-dimensional array of the PostgreSQL double
-// precision type.
-type Float64Array []float64
-
-// Scan implements the sql.Scanner interface.
-func (a *Float64Array) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- return a.scanBytes(src)
- case string:
- return a.scanBytes([]byte(src))
- case nil:
- *a = nil
- return nil
- }
-
- return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
-}
-
-func (a *Float64Array) scanBytes(src []byte) error {
- elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
- if err != nil {
- return err
- }
- if *a != nil && len(elems) == 0 {
- *a = (*a)[:0]
- } else {
- b := make(Float64Array, len(elems))
- for i, v := range elems {
- if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
- return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
- }
- }
- *a = b
- }
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (a Float64Array) Value() (driver.Value, error) {
- if a == nil {
- return nil, nil
- }
-
- if n := len(a); n > 0 {
- // There will be at least two curly brackets, N bytes of values,
- // and N-1 bytes of delimiters.
- b := make([]byte, 1, 1+2*n)
- b[0] = '{'
-
- b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
- for i := 1; i < n; i++ {
- b = append(b, ',')
- b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
- }
-
- return string(append(b, '}')), nil
- }
-
- return "{}", nil
-}
-
-// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
-// an array or slice of any dimension.
-type GenericArray struct{ A interface{} }
-
-func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
- var assign func([]byte, reflect.Value) error
- var del = ","
-
- // TODO calculate the assign function for other types
- // TODO repeat this section on the element type of arrays or slices (multidimensional)
- {
- if reflect.PtrTo(rt).Implements(typeSQLScanner) {
- // dest is always addressable because it is an element of a slice.
- assign = func(src []byte, dest reflect.Value) (err error) {
- ss := dest.Addr().Interface().(sql.Scanner)
- if src == nil {
- err = ss.Scan(nil)
- } else {
- err = ss.Scan(src)
- }
- return
- }
- goto FoundType
- }
-
- assign = func([]byte, reflect.Value) error {
- return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
- }
- }
-
-FoundType:
-
- if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
- del = ad.ArrayDelimiter()
- }
-
- return rt, assign, del
-}
-
-// Scan implements the sql.Scanner interface.
-func (a GenericArray) Scan(src interface{}) error {
- dpv := reflect.ValueOf(a.A)
- switch {
- case dpv.Kind() != reflect.Ptr:
- return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
- case dpv.IsNil():
- return fmt.Errorf("pq: destination %T is nil", a.A)
- }
-
- dv := dpv.Elem()
- switch dv.Kind() {
- case reflect.Slice:
- case reflect.Array:
- default:
- return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
- }
-
- switch src := src.(type) {
- case []byte:
- return a.scanBytes(src, dv)
- case string:
- return a.scanBytes([]byte(src), dv)
- case nil:
- if dv.Kind() == reflect.Slice {
- dv.Set(reflect.Zero(dv.Type()))
- return nil
- }
- }
-
- return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
-}
-
-func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
- dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
- dims, elems, err := parseArray(src, []byte(del))
- if err != nil {
- return err
- }
-
- // TODO allow multidimensional
-
- if len(dims) > 1 {
- return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
- strings.Replace(fmt.Sprint(dims), " ", "][", -1))
- }
-
- // Treat a zero-dimensional array like an array with a single dimension of zero.
- if len(dims) == 0 {
- dims = append(dims, 0)
- }
-
- for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
- switch rt.Kind() {
- case reflect.Slice:
- case reflect.Array:
- if rt.Len() != dims[i] {
- return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
- strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
- }
- default:
- // TODO handle multidimensional
- }
- }
-
- values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
- for i, e := range elems {
- if err := assign(e, values.Index(i)); err != nil {
- return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
- }
- }
-
- // TODO handle multidimensional
-
- switch dv.Kind() {
- case reflect.Slice:
- dv.Set(values.Slice(0, dims[0]))
- case reflect.Array:
- for i := 0; i < dims[0]; i++ {
- dv.Index(i).Set(values.Index(i))
- }
- }
-
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (a GenericArray) Value() (driver.Value, error) {
- if a.A == nil {
- return nil, nil
- }
-
- rv := reflect.ValueOf(a.A)
-
- switch rv.Kind() {
- case reflect.Slice:
- if rv.IsNil() {
- return nil, nil
- }
- case reflect.Array:
- default:
- return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
- }
-
- if n := rv.Len(); n > 0 {
- // There will be at least two curly brackets, N bytes of values,
- // and N-1 bytes of delimiters.
- b := make([]byte, 0, 1+2*n)
-
- b, _, err := appendArray(b, rv, n)
- return string(b), err
- }
-
- return "{}", nil
-}
-
-// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
-type Int64Array []int64
-
-// Scan implements the sql.Scanner interface.
-func (a *Int64Array) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- return a.scanBytes(src)
- case string:
- return a.scanBytes([]byte(src))
- case nil:
- *a = nil
- return nil
- }
-
- return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
-}
-
-func (a *Int64Array) scanBytes(src []byte) error {
- elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
- if err != nil {
- return err
- }
- if *a != nil && len(elems) == 0 {
- *a = (*a)[:0]
- } else {
- b := make(Int64Array, len(elems))
- for i, v := range elems {
- if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
- return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
- }
- }
- *a = b
- }
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (a Int64Array) Value() (driver.Value, error) {
- if a == nil {
- return nil, nil
- }
-
- if n := len(a); n > 0 {
- // There will be at least two curly brackets, N bytes of values,
- // and N-1 bytes of delimiters.
- b := make([]byte, 1, 1+2*n)
- b[0] = '{'
-
- b = strconv.AppendInt(b, a[0], 10)
- for i := 1; i < n; i++ {
- b = append(b, ',')
- b = strconv.AppendInt(b, a[i], 10)
- }
-
- return string(append(b, '}')), nil
- }
-
- return "{}", nil
-}
-
-// StringArray represents a one-dimensional array of the PostgreSQL character types.
-type StringArray []string
-
-// Scan implements the sql.Scanner interface.
-func (a *StringArray) Scan(src interface{}) error {
- switch src := src.(type) {
- case []byte:
- return a.scanBytes(src)
- case string:
- return a.scanBytes([]byte(src))
- case nil:
- *a = nil
- return nil
- }
-
- return fmt.Errorf("pq: cannot convert %T to StringArray", src)
-}
-
-func (a *StringArray) scanBytes(src []byte) error {
- elems, err := scanLinearArray(src, []byte{','}, "StringArray")
- if err != nil {
- return err
- }
- if *a != nil && len(elems) == 0 {
- *a = (*a)[:0]
- } else {
- b := make(StringArray, len(elems))
- for i, v := range elems {
- if b[i] = string(v); v == nil {
- return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
- }
- }
- *a = b
- }
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (a StringArray) Value() (driver.Value, error) {
- if a == nil {
- return nil, nil
- }
-
- if n := len(a); n > 0 {
- // There will be at least two curly brackets, 2*N bytes of quotes,
- // and N-1 bytes of delimiters.
- b := make([]byte, 1, 1+3*n)
- b[0] = '{'
-
- b = appendArrayQuotedBytes(b, []byte(a[0]))
- for i := 1; i < n; i++ {
- b = append(b, ',')
- b = appendArrayQuotedBytes(b, []byte(a[i]))
- }
-
- return string(append(b, '}')), nil
- }
-
- return "{}", nil
-}
-
-// appendArray appends rv to the buffer, returning the extended buffer and
-// the delimiter used between elements.
-//
-// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
-func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
- var del string
- var err error
-
- b = append(b, '{')
-
- if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
- return b, del, err
- }
-
- for i := 1; i < n; i++ {
- b = append(b, del...)
- if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
- return b, del, err
- }
- }
-
- return append(b, '}'), del, nil
-}
-
-// appendArrayElement appends rv to the buffer, returning the extended buffer
-// and the delimiter to use before the next element.
-//
-// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
-// using driver.DefaultParameterConverter and the resulting []byte or string
-// is double-quoted.
-//
-// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
-func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
- if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
- if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
- if n := rv.Len(); n > 0 {
- return appendArray(b, rv, n)
- }
-
- return b, "", nil
- }
- }
-
- var del = ","
- var err error
- var iv interface{} = rv.Interface()
-
- if ad, ok := iv.(ArrayDelimiter); ok {
- del = ad.ArrayDelimiter()
- }
-
- if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
- return b, del, err
- }
-
- switch v := iv.(type) {
- case nil:
- return append(b, "NULL"...), del, nil
- case []byte:
- return appendArrayQuotedBytes(b, v), del, nil
- case string:
- return appendArrayQuotedBytes(b, []byte(v)), del, nil
- }
-
- b, err = appendValue(b, iv)
- return b, del, err
-}
-
-func appendArrayQuotedBytes(b, v []byte) []byte {
- b = append(b, '"')
- for {
- i := bytes.IndexAny(v, `"\`)
- if i < 0 {
- b = append(b, v...)
- break
- }
- if i > 0 {
- b = append(b, v[:i]...)
- }
- b = append(b, '\\', v[i])
- v = v[i+1:]
- }
- return append(b, '"')
-}
-
-func appendValue(b []byte, v driver.Value) ([]byte, error) {
- return append(b, encode(nil, v, 0)...), nil
-}
-
-// parseArray extracts the dimensions and elements of an array represented in
-// text format. Only representations emitted by the backend are supported.
-// Notably, whitespace around brackets and delimiters is significant, and NULL
-// is case-sensitive.
-//
-// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
-func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
- var depth, i int
-
- if len(src) < 1 || src[0] != '{' {
- return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
- }
-
-Open:
- for i < len(src) {
- switch src[i] {
- case '{':
- depth++
- i++
- case '}':
- elems = make([][]byte, 0)
- goto Close
- default:
- break Open
- }
- }
- dims = make([]int, i)
-
-Element:
- for i < len(src) {
- switch src[i] {
- case '{':
- if depth == len(dims) {
- break Element
- }
- depth++
- dims[depth-1] = 0
- i++
- case '"':
- var elem = []byte{}
- var escape bool
- for i++; i < len(src); i++ {
- if escape {
- elem = append(elem, src[i])
- escape = false
- } else {
- switch src[i] {
- default:
- elem = append(elem, src[i])
- case '\\':
- escape = true
- case '"':
- elems = append(elems, elem)
- i++
- break Element
- }
- }
- }
- default:
- for start := i; i < len(src); i++ {
- if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
- elem := src[start:i]
- if len(elem) == 0 {
- return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
- }
- if bytes.Equal(elem, []byte("NULL")) {
- elem = nil
- }
- elems = append(elems, elem)
- break Element
- }
- }
- }
- }
-
- for i < len(src) {
- if bytes.HasPrefix(src[i:], del) && depth > 0 {
- dims[depth-1]++
- i += len(del)
- goto Element
- } else if src[i] == '}' && depth > 0 {
- dims[depth-1]++
- depth--
- i++
- } else {
- return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
- }
- }
-
-Close:
- for i < len(src) {
- if src[i] == '}' && depth > 0 {
- depth--
- i++
- } else {
- return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
- }
- }
- if depth > 0 {
- err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
- }
- if err == nil {
- for _, d := range dims {
- if (len(elems) % d) != 0 {
- err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
- }
- }
- }
- return
-}
-
-func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
- dims, elems, err := parseArray(src, del)
- if err != nil {
- return nil, err
- }
- if len(dims) > 1 {
- return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
- }
- return elems, err
-}
diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go
deleted file mode 100644
index 666b0012..00000000
--- a/vendor/github.com/lib/pq/buf.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package pq
-
-import (
- "bytes"
- "encoding/binary"
-
- "github.com/lib/pq/oid"
-)
-
-type readBuf []byte
-
-func (b *readBuf) int32() (n int) {
- n = int(int32(binary.BigEndian.Uint32(*b)))
- *b = (*b)[4:]
- return
-}
-
-func (b *readBuf) oid() (n oid.Oid) {
- n = oid.Oid(binary.BigEndian.Uint32(*b))
- *b = (*b)[4:]
- return
-}
-
-// N.B: this is actually an unsigned 16-bit integer, unlike int32
-func (b *readBuf) int16() (n int) {
- n = int(binary.BigEndian.Uint16(*b))
- *b = (*b)[2:]
- return
-}
-
-func (b *readBuf) string() string {
- i := bytes.IndexByte(*b, 0)
- if i < 0 {
- errorf("invalid message format; expected string terminator")
- }
- s := (*b)[:i]
- *b = (*b)[i+1:]
- return string(s)
-}
-
-func (b *readBuf) next(n int) (v []byte) {
- v = (*b)[:n]
- *b = (*b)[n:]
- return
-}
-
-func (b *readBuf) byte() byte {
- return b.next(1)[0]
-}
-
-type writeBuf struct {
- buf []byte
- pos int
-}
-
-func (b *writeBuf) int32(n int) {
- x := make([]byte, 4)
- binary.BigEndian.PutUint32(x, uint32(n))
- b.buf = append(b.buf, x...)
-}
-
-func (b *writeBuf) int16(n int) {
- x := make([]byte, 2)
- binary.BigEndian.PutUint16(x, uint16(n))
- b.buf = append(b.buf, x...)
-}
-
-func (b *writeBuf) string(s string) {
- b.buf = append(b.buf, (s + "\000")...)
-}
-
-func (b *writeBuf) byte(c byte) {
- b.buf = append(b.buf, c)
-}
-
-func (b *writeBuf) bytes(v []byte) {
- b.buf = append(b.buf, v...)
-}
-
-func (b *writeBuf) wrap() []byte {
- p := b.buf[b.pos:]
- binary.BigEndian.PutUint32(p, uint32(len(p)))
- return b.buf
-}
-
-func (b *writeBuf) next(c byte) {
- p := b.buf[b.pos:]
- binary.BigEndian.PutUint32(p, uint32(len(p)))
- b.pos = len(b.buf) + 1
- b.buf = append(b.buf, c, 0, 0, 0, 0)
-}
diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go
deleted file mode 100644
index 43c8df29..00000000
--- a/vendor/github.com/lib/pq/conn.go
+++ /dev/null
@@ -1,1854 +0,0 @@
-package pq
-
-import (
- "bufio"
- "crypto/md5"
- "database/sql"
- "database/sql/driver"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "net"
- "os"
- "os/user"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "time"
- "unicode"
-
- "github.com/lib/pq/oid"
-)
-
-// Common error types
-var (
- ErrNotSupported = errors.New("pq: Unsupported command")
- ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction")
- ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server")
- ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less")
- ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly")
-
- errUnexpectedReady = errors.New("unexpected ReadyForQuery")
- errNoRowsAffected = errors.New("no RowsAffected available after the empty statement")
- errNoLastInsertID = errors.New("no LastInsertId available after the empty statement")
-)
-
-// Driver is the Postgres database driver.
-type Driver struct{}
-
-// Open opens a new connection to the database. name is a connection string.
-// Most users should only use it through database/sql package from the standard
-// library.
-func (d *Driver) Open(name string) (driver.Conn, error) {
- return Open(name)
-}
-
-func init() {
- sql.Register("postgres", &Driver{})
-}
-
-type parameterStatus struct {
- // server version in the same format as server_version_num, or 0 if
- // unavailable
- serverVersion int
-
- // the current location based on the TimeZone value of the session, if
- // available
- currentLocation *time.Location
-}
-
-type transactionStatus byte
-
-const (
- txnStatusIdle transactionStatus = 'I'
- txnStatusIdleInTransaction transactionStatus = 'T'
- txnStatusInFailedTransaction transactionStatus = 'E'
-)
-
-func (s transactionStatus) String() string {
- switch s {
- case txnStatusIdle:
- return "idle"
- case txnStatusIdleInTransaction:
- return "idle in transaction"
- case txnStatusInFailedTransaction:
- return "in a failed transaction"
- default:
- errorf("unknown transactionStatus %d", s)
- }
-
- panic("not reached")
-}
-
-// Dialer is the dialer interface. It can be used to obtain more control over
-// how pq creates network connections.
-type Dialer interface {
- Dial(network, address string) (net.Conn, error)
- DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
-}
-
-type defaultDialer struct{}
-
-func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) {
- return net.Dial(ntw, addr)
-}
-func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) {
- return net.DialTimeout(ntw, addr, timeout)
-}
-
-type conn struct {
- c net.Conn
- buf *bufio.Reader
- namei int
- scratch [512]byte
- txnStatus transactionStatus
- txnFinish func()
-
- // Save connection arguments to use during CancelRequest.
- dialer Dialer
- opts values
-
- // Cancellation key data for use with CancelRequest messages.
- processID int
- secretKey int
-
- parameterStatus parameterStatus
-
- saveMessageType byte
- saveMessageBuffer []byte
-
- // If true, this connection is bad and all public-facing functions should
- // return ErrBadConn.
- bad bool
-
- // If set, this connection should never use the binary format when
- // receiving query results from prepared statements. Only provided for
- // debugging.
- disablePreparedBinaryResult bool
-
- // Whether to always send []byte parameters over as binary. Enables single
- // round-trip mode for non-prepared Query calls.
- binaryParameters bool
-
- // If true this connection is in the middle of a COPY
- inCopy bool
-}
-
-// Handle driver-side settings in parsed connection string.
-func (cn *conn) handleDriverSettings(o values) (err error) {
- boolSetting := func(key string, val *bool) error {
- if value, ok := o[key]; ok {
- if value == "yes" {
- *val = true
- } else if value == "no" {
- *val = false
- } else {
- return fmt.Errorf("unrecognized value %q for %s", value, key)
- }
- }
- return nil
- }
-
- err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult)
- if err != nil {
- return err
- }
- return boolSetting("binary_parameters", &cn.binaryParameters)
-}
-
-func (cn *conn) handlePgpass(o values) {
- // if a password was supplied, do not process .pgpass
- if _, ok := o["password"]; ok {
- return
- }
- filename := os.Getenv("PGPASSFILE")
- if filename == "" {
- // XXX this code doesn't work on Windows where the default filename is
- // XXX %APPDATA%\postgresql\pgpass.conf
- // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
- userHome := os.Getenv("HOME")
- if userHome == "" {
- user, err := user.Current()
- if err != nil {
- return
- }
- userHome = user.HomeDir
- }
- filename = filepath.Join(userHome, ".pgpass")
- }
- fileinfo, err := os.Stat(filename)
- if err != nil {
- return
- }
- mode := fileinfo.Mode()
- if mode&(0x77) != 0 {
- // XXX should warn about incorrect .pgpass permissions as psql does
- return
- }
- file, err := os.Open(filename)
- if err != nil {
- return
- }
- defer file.Close()
- scanner := bufio.NewScanner(io.Reader(file))
- hostname := o["host"]
- ntw, _ := network(o)
- port := o["port"]
- db := o["dbname"]
- username := o["user"]
- // From: https://github.com/tg/pgpass/blob/master/reader.go
- getFields := func(s string) []string {
- fs := make([]string, 0, 5)
- f := make([]rune, 0, len(s))
-
- var esc bool
- for _, c := range s {
- switch {
- case esc:
- f = append(f, c)
- esc = false
- case c == '\\':
- esc = true
- case c == ':':
- fs = append(fs, string(f))
- f = f[:0]
- default:
- f = append(f, c)
- }
- }
- return append(fs, string(f))
- }
- for scanner.Scan() {
- line := scanner.Text()
- if len(line) == 0 || line[0] == '#' {
- continue
- }
- split := getFields(line)
- if len(split) != 5 {
- continue
- }
- if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
- o["password"] = split[4]
- return
- }
- }
-}
-
-func (cn *conn) writeBuf(b byte) *writeBuf {
- cn.scratch[0] = b
- return &writeBuf{
- buf: cn.scratch[:5],
- pos: 1,
- }
-}
-
-// Open opens a new connection to the database. name is a connection string.
-// Most users should only use it through database/sql package from the standard
-// library.
-func Open(name string) (_ driver.Conn, err error) {
- return DialOpen(defaultDialer{}, name)
-}
-
-// DialOpen opens a new connection to the database using a dialer.
-func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
- // Handle any panics during connection initialization. Note that we
- // specifically do *not* want to use errRecover(), as that would turn any
- // connection errors into ErrBadConns, hiding the real error message from
- // the user.
- defer errRecoverNoErrBadConn(&err)
-
- o := make(values)
-
- // A number of defaults are applied here, in this order:
- //
- // * Very low precedence defaults applied in every situation
- // * Environment variables
- // * Explicitly passed connection information
- o["host"] = "localhost"
- o["port"] = "5432"
- // N.B.: Extra float digits should be set to 3, but that breaks
- // Postgres 8.4 and older, where the max is 2.
- o["extra_float_digits"] = "2"
- for k, v := range parseEnviron(os.Environ()) {
- o[k] = v
- }
-
- if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") {
- name, err = ParseURL(name)
- if err != nil {
- return nil, err
- }
- }
-
- if err := parseOpts(name, o); err != nil {
- return nil, err
- }
-
- // Use the "fallback" application name if necessary
- if fallback, ok := o["fallback_application_name"]; ok {
- if _, ok := o["application_name"]; !ok {
- o["application_name"] = fallback
- }
- }
-
- // We can't work with any client_encoding other than UTF-8 currently.
- // However, we have historically allowed the user to set it to UTF-8
- // explicitly, and there's no reason to break such programs, so allow that.
- // Note that the "options" setting could also set client_encoding, but
- // parsing its value is not worth it. Instead, we always explicitly send
- // client_encoding as a separate run-time parameter, which should override
- // anything set in options.
- if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
- return nil, errors.New("client_encoding must be absent or 'UTF8'")
- }
- o["client_encoding"] = "UTF8"
- // DateStyle needs a similar treatment.
- if datestyle, ok := o["datestyle"]; ok {
- if datestyle != "ISO, MDY" {
- panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v",
- "ISO, MDY", datestyle))
- }
- } else {
- o["datestyle"] = "ISO, MDY"
- }
-
- // If a user is not provided by any other means, the last
- // resort is to use the current operating system provided user
- // name.
- if _, ok := o["user"]; !ok {
- u, err := userCurrent()
- if err != nil {
- return nil, err
- }
- o["user"] = u
- }
-
- cn := &conn{
- opts: o,
- dialer: d,
- }
- err = cn.handleDriverSettings(o)
- if err != nil {
- return nil, err
- }
- cn.handlePgpass(o)
-
- cn.c, err = dial(d, o)
- if err != nil {
- return nil, err
- }
-
- err = cn.ssl(o)
- if err != nil {
- return nil, err
- }
-
- // cn.startup panics on error. Make sure we don't leak cn.c.
- panicking := true
- defer func() {
- if panicking {
- cn.c.Close()
- }
- }()
-
- cn.buf = bufio.NewReader(cn.c)
- cn.startup(o)
-
- // reset the deadline, in case one was set (see dial)
- if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
- err = cn.c.SetDeadline(time.Time{})
- }
- panicking = false
- return cn, err
-}
-
-func dial(d Dialer, o values) (net.Conn, error) {
- ntw, addr := network(o)
- // SSL is not necessary or supported over UNIX domain sockets
- if ntw == "unix" {
- o["sslmode"] = "disable"
- }
-
- // Zero or not specified means wait indefinitely.
- if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
- seconds, err := strconv.ParseInt(timeout, 10, 0)
- if err != nil {
- return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
- }
- duration := time.Duration(seconds) * time.Second
- // connect_timeout should apply to the entire connection establishment
- // procedure, so we both use a timeout for the TCP connection
- // establishment and set a deadline for doing the initial handshake.
- // The deadline is then reset after startup() is done.
- deadline := time.Now().Add(duration)
- conn, err := d.DialTimeout(ntw, addr, duration)
- if err != nil {
- return nil, err
- }
- err = conn.SetDeadline(deadline)
- return conn, err
- }
- return d.Dial(ntw, addr)
-}
-
-func network(o values) (string, string) {
- host := o["host"]
-
- if strings.HasPrefix(host, "/") {
- sockPath := path.Join(host, ".s.PGSQL."+o["port"])
- return "unix", sockPath
- }
-
- return "tcp", net.JoinHostPort(host, o["port"])
-}
-
-type values map[string]string
-
-// scanner implements a tokenizer for libpq-style option strings.
-type scanner struct {
- s []rune
- i int
-}
-
-// newScanner returns a new scanner initialized with the option string s.
-func newScanner(s string) *scanner {
- return &scanner{[]rune(s), 0}
-}
-
-// Next returns the next rune.
-// It returns 0, false if the end of the text has been reached.
-func (s *scanner) Next() (rune, bool) {
- if s.i >= len(s.s) {
- return 0, false
- }
- r := s.s[s.i]
- s.i++
- return r, true
-}
-
-// SkipSpaces returns the next non-whitespace rune.
-// It returns 0, false if the end of the text has been reached.
-func (s *scanner) SkipSpaces() (rune, bool) {
- r, ok := s.Next()
- for unicode.IsSpace(r) && ok {
- r, ok = s.Next()
- }
- return r, ok
-}
-
-// parseOpts parses the options from name and adds them to the values.
-//
-// The parsing code is based on conninfo_parse from libpq's fe-connect.c
-func parseOpts(name string, o values) error {
- s := newScanner(name)
-
- for {
- var (
- keyRunes, valRunes []rune
- r rune
- ok bool
- )
-
- if r, ok = s.SkipSpaces(); !ok {
- break
- }
-
- // Scan the key
- for !unicode.IsSpace(r) && r != '=' {
- keyRunes = append(keyRunes, r)
- if r, ok = s.Next(); !ok {
- break
- }
- }
-
- // Skip any whitespace if we're not at the = yet
- if r != '=' {
- r, ok = s.SkipSpaces()
- }
-
- // The current character should be =
- if r != '=' || !ok {
- return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes))
- }
-
- // Skip any whitespace after the =
- if r, ok = s.SkipSpaces(); !ok {
- // If we reach the end here, the last value is just an empty string as per libpq.
- o[string(keyRunes)] = ""
- break
- }
-
- if r != '\'' {
- for !unicode.IsSpace(r) {
- if r == '\\' {
- if r, ok = s.Next(); !ok {
- return fmt.Errorf(`missing character after backslash`)
- }
- }
- valRunes = append(valRunes, r)
-
- if r, ok = s.Next(); !ok {
- break
- }
- }
- } else {
- quote:
- for {
- if r, ok = s.Next(); !ok {
- return fmt.Errorf(`unterminated quoted string literal in connection string`)
- }
- switch r {
- case '\'':
- break quote
- case '\\':
- r, _ = s.Next()
- fallthrough
- default:
- valRunes = append(valRunes, r)
- }
- }
- }
-
- o[string(keyRunes)] = string(valRunes)
- }
-
- return nil
-}
-
-func (cn *conn) isInTransaction() bool {
- return cn.txnStatus == txnStatusIdleInTransaction ||
- cn.txnStatus == txnStatusInFailedTransaction
-}
-
-func (cn *conn) checkIsInTransaction(intxn bool) {
- if cn.isInTransaction() != intxn {
- cn.bad = true
- errorf("unexpected transaction status %v", cn.txnStatus)
- }
-}
-
-func (cn *conn) Begin() (_ driver.Tx, err error) {
- return cn.begin("")
-}
-
-func (cn *conn) begin(mode string) (_ driver.Tx, err error) {
- if cn.bad {
- return nil, driver.ErrBadConn
- }
- defer cn.errRecover(&err)
-
- cn.checkIsInTransaction(false)
- _, commandTag, err := cn.simpleExec("BEGIN" + mode)
- if err != nil {
- return nil, err
- }
- if commandTag != "BEGIN" {
- cn.bad = true
- return nil, fmt.Errorf("unexpected command tag %s", commandTag)
- }
- if cn.txnStatus != txnStatusIdleInTransaction {
- cn.bad = true
- return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus)
- }
- return cn, nil
-}
-
-func (cn *conn) closeTxn() {
- if finish := cn.txnFinish; finish != nil {
- finish()
- }
-}
-
-func (cn *conn) Commit() (err error) {
- defer cn.closeTxn()
- if cn.bad {
- return driver.ErrBadConn
- }
- defer cn.errRecover(&err)
-
- cn.checkIsInTransaction(true)
- // We don't want the client to think that everything is okay if it tries
- // to commit a failed transaction. However, no matter what we return,
- // database/sql will release this connection back into the free connection
- // pool so we have to abort the current transaction here. Note that you
- // would get the same behaviour if you issued a COMMIT in a failed
- // transaction, so it's also the least surprising thing to do here.
- if cn.txnStatus == txnStatusInFailedTransaction {
- if err := cn.Rollback(); err != nil {
- return err
- }
- return ErrInFailedTransaction
- }
-
- _, commandTag, err := cn.simpleExec("COMMIT")
- if err != nil {
- if cn.isInTransaction() {
- cn.bad = true
- }
- return err
- }
- if commandTag != "COMMIT" {
- cn.bad = true
- return fmt.Errorf("unexpected command tag %s", commandTag)
- }
- cn.checkIsInTransaction(false)
- return nil
-}
-
-func (cn *conn) Rollback() (err error) {
- defer cn.closeTxn()
- if cn.bad {
- return driver.ErrBadConn
- }
- defer cn.errRecover(&err)
-
- cn.checkIsInTransaction(true)
- _, commandTag, err := cn.simpleExec("ROLLBACK")
- if err != nil {
- if cn.isInTransaction() {
- cn.bad = true
- }
- return err
- }
- if commandTag != "ROLLBACK" {
- return fmt.Errorf("unexpected command tag %s", commandTag)
- }
- cn.checkIsInTransaction(false)
- return nil
-}
-
-func (cn *conn) gname() string {
- cn.namei++
- return strconv.FormatInt(int64(cn.namei), 10)
-}
-
-func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) {
- b := cn.writeBuf('Q')
- b.string(q)
- cn.send(b)
-
- for {
- t, r := cn.recv1()
- switch t {
- case 'C':
- res, commandTag = cn.parseComplete(r.string())
- case 'Z':
- cn.processReadyForQuery(r)
- if res == nil && err == nil {
- err = errUnexpectedReady
- }
- // done
- return
- case 'E':
- err = parseError(r)
- case 'I':
- res = emptyRows
- case 'T', 'D':
- // ignore any results
- default:
- cn.bad = true
- errorf("unknown response for simple query: %q", t)
- }
- }
-}
-
-func (cn *conn) simpleQuery(q string) (res *rows, err error) {
- defer cn.errRecover(&err)
-
- b := cn.writeBuf('Q')
- b.string(q)
- cn.send(b)
-
- for {
- t, r := cn.recv1()
- switch t {
- case 'C', 'I':
- // We allow queries which don't return any results through Query as
- // well as Exec. We still have to give database/sql a rows object
- // the user can close, though, to avoid connections from being
- // leaked. A "rows" with done=true works fine for that purpose.
- if err != nil {
- cn.bad = true
- errorf("unexpected message %q in simple query execution", t)
- }
- if res == nil {
- res = &rows{
- cn: cn,
- }
- }
- // Set the result and tag to the last command complete if there wasn't a
- // query already run. Although queries usually return from here and cede
- // control to Next, a query with zero results does not.
- if t == 'C' && res.colNames == nil {
- res.result, res.tag = cn.parseComplete(r.string())
- }
- res.done = true
- case 'Z':
- cn.processReadyForQuery(r)
- // done
- return
- case 'E':
- res = nil
- err = parseError(r)
- case 'D':
- if res == nil {
- cn.bad = true
- errorf("unexpected DataRow in simple query execution")
- }
- // the query didn't fail; kick off to Next
- cn.saveMessage(t, r)
- return
- case 'T':
- // res might be non-nil here if we received a previous
- // CommandComplete, but that's fine; just overwrite it
- res = &rows{cn: cn}
- res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r)
-
- // To work around a bug in QueryRow in Go 1.2 and earlier, wait
- // until the first DataRow has been received.
- default:
- cn.bad = true
- errorf("unknown response for simple query: %q", t)
- }
- }
-}
-
-type noRows struct{}
-
-var emptyRows noRows
-
-var _ driver.Result = noRows{}
-
-func (noRows) LastInsertId() (int64, error) {
- return 0, errNoLastInsertID
-}
-
-func (noRows) RowsAffected() (int64, error) {
- return 0, errNoRowsAffected
-}
-
-// Decides which column formats to use for a prepared statement. The input is
-// an array of type oids, one element per result column.
-func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) {
- if len(colTyps) == 0 {
- return nil, colFmtDataAllText
- }
-
- colFmts = make([]format, len(colTyps))
- if forceText {
- return colFmts, colFmtDataAllText
- }
-
- allBinary := true
- allText := true
- for i, t := range colTyps {
- switch t.OID {
- // This is the list of types to use binary mode for when receiving them
- // through a prepared statement. If a type appears in this list, it
- // must also be implemented in binaryDecode in encode.go.
- case oid.T_bytea:
- fallthrough
- case oid.T_int8:
- fallthrough
- case oid.T_int4:
- fallthrough
- case oid.T_int2:
- fallthrough
- case oid.T_uuid:
- colFmts[i] = formatBinary
- allText = false
-
- default:
- allBinary = false
- }
- }
-
- if allBinary {
- return colFmts, colFmtDataAllBinary
- } else if allText {
- return colFmts, colFmtDataAllText
- } else {
- colFmtData = make([]byte, 2+len(colFmts)*2)
- binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts)))
- for i, v := range colFmts {
- binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v))
- }
- return colFmts, colFmtData
- }
-}
-
-func (cn *conn) prepareTo(q, stmtName string) *stmt {
- st := &stmt{cn: cn, name: stmtName}
-
- b := cn.writeBuf('P')
- b.string(st.name)
- b.string(q)
- b.int16(0)
-
- b.next('D')
- b.byte('S')
- b.string(st.name)
-
- b.next('S')
- cn.send(b)
-
- cn.readParseResponse()
- st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse()
- st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult)
- cn.readReadyForQuery()
- return st
-}
-
-func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) {
- if cn.bad {
- return nil, driver.ErrBadConn
- }
- defer cn.errRecover(&err)
-
- if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") {
- s, err := cn.prepareCopyIn(q)
- if err == nil {
- cn.inCopy = true
- }
- return s, err
- }
- return cn.prepareTo(q, cn.gname()), nil
-}
-
-func (cn *conn) Close() (err error) {
- // Skip cn.bad return here because we always want to close a connection.
- defer cn.errRecover(&err)
-
- // Ensure that cn.c.Close is always run. Since error handling is done with
- // panics and cn.errRecover, the Close must be in a defer.
- defer func() {
- cerr := cn.c.Close()
- if err == nil {
- err = cerr
- }
- }()
-
- // Don't go through send(); ListenerConn relies on us not scribbling on the
- // scratch buffer of this connection.
- return cn.sendSimpleMessage('X')
-}
-
-// Implement the "Queryer" interface
-func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) {
- return cn.query(query, args)
-}
-
-func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) {
- if cn.bad {
- return nil, driver.ErrBadConn
- }
- if cn.inCopy {
- return nil, errCopyInProgress
- }
- defer cn.errRecover(&err)
-
- // Check to see if we can use the "simpleQuery" interface, which is
- // *much* faster than going through prepare/exec
- if len(args) == 0 {
- return cn.simpleQuery(query)
- }
-
- if cn.binaryParameters {
- cn.sendBinaryModeQuery(query, args)
-
- cn.readParseResponse()
- cn.readBindResponse()
- rows := &rows{cn: cn}
- rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse()
- cn.postExecuteWorkaround()
- return rows, nil
- }
- st := cn.prepareTo(query, "")
- st.exec(args)
- return &rows{
- cn: cn,
- colNames: st.colNames,
- colTyps: st.colTyps,
- colFmts: st.colFmts,
- }, nil
-}
-
-// Implement the optional "Execer" interface for one-shot queries
-func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) {
- if cn.bad {
- return nil, driver.ErrBadConn
- }
- defer cn.errRecover(&err)
-
- // Check to see if we can use the "simpleExec" interface, which is
- // *much* faster than going through prepare/exec
- if len(args) == 0 {
- // ignore commandTag, our caller doesn't care
- r, _, err := cn.simpleExec(query)
- return r, err
- }
-
- if cn.binaryParameters {
- cn.sendBinaryModeQuery(query, args)
-
- cn.readParseResponse()
- cn.readBindResponse()
- cn.readPortalDescribeResponse()
- cn.postExecuteWorkaround()
- res, _, err = cn.readExecuteResponse("Execute")
- return res, err
- }
- // Use the unnamed statement to defer planning until bind
- // time, or else value-based selectivity estimates cannot be
- // used.
- st := cn.prepareTo(query, "")
- r, err := st.Exec(args)
- if err != nil {
- panic(err)
- }
- return r, err
-}
-
-func (cn *conn) send(m *writeBuf) {
- _, err := cn.c.Write(m.wrap())
- if err != nil {
- panic(err)
- }
-}
-
-func (cn *conn) sendStartupPacket(m *writeBuf) error {
- _, err := cn.c.Write((m.wrap())[1:])
- return err
-}
-
-// Send a message of type typ to the server on the other end of cn. The
-// message should have no payload. This method does not use the scratch
-// buffer.
-func (cn *conn) sendSimpleMessage(typ byte) (err error) {
- _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'})
- return err
-}
-
-// saveMessage memorizes a message and its buffer in the conn struct.
-// recvMessage will then return these values on the next call to it. This
-// method is useful in cases where you have to see what the next message is
-// going to be (e.g. to see whether it's an error or not) but you can't handle
-// the message yourself.
-func (cn *conn) saveMessage(typ byte, buf *readBuf) {
- if cn.saveMessageType != 0 {
- cn.bad = true
- errorf("unexpected saveMessageType %d", cn.saveMessageType)
- }
- cn.saveMessageType = typ
- cn.saveMessageBuffer = *buf
-}
-
-// recvMessage receives any message from the backend, or returns an error if
-// a problem occurred while reading the message.
-func (cn *conn) recvMessage(r *readBuf) (byte, error) {
- // workaround for a QueryRow bug, see exec
- if cn.saveMessageType != 0 {
- t := cn.saveMessageType
- *r = cn.saveMessageBuffer
- cn.saveMessageType = 0
- cn.saveMessageBuffer = nil
- return t, nil
- }
-
- x := cn.scratch[:5]
- _, err := io.ReadFull(cn.buf, x)
- if err != nil {
- return 0, err
- }
-
- // read the type and length of the message that follows
- t := x[0]
- n := int(binary.BigEndian.Uint32(x[1:])) - 4
- var y []byte
- if n <= len(cn.scratch) {
- y = cn.scratch[:n]
- } else {
- y = make([]byte, n)
- }
- _, err = io.ReadFull(cn.buf, y)
- if err != nil {
- return 0, err
- }
- *r = y
- return t, nil
-}
-
-// recv receives a message from the backend, but if an error happened while
-// reading the message or the received message was an ErrorResponse, it panics.
-// NoticeResponses are ignored. This function should generally be used only
-// during the startup sequence.
-func (cn *conn) recv() (t byte, r *readBuf) {
- for {
- var err error
- r = &readBuf{}
- t, err = cn.recvMessage(r)
- if err != nil {
- panic(err)
- }
-
- switch t {
- case 'E':
- panic(parseError(r))
- case 'N':
- // ignore
- default:
- return
- }
- }
-}
-
-// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by
-// the caller to avoid an allocation.
-func (cn *conn) recv1Buf(r *readBuf) byte {
- for {
- t, err := cn.recvMessage(r)
- if err != nil {
- panic(err)
- }
-
- switch t {
- case 'A', 'N':
- // ignore
- case 'S':
- cn.processParameterStatus(r)
- default:
- return t
- }
- }
-}
-
-// recv1 receives a message from the backend, panicking if an error occurs
-// while attempting to read it. All asynchronous messages are ignored, with
-// the exception of ErrorResponse.
-func (cn *conn) recv1() (t byte, r *readBuf) {
- r = &readBuf{}
- t = cn.recv1Buf(r)
- return t, r
-}
-
-func (cn *conn) ssl(o values) error {
- upgrade, err := ssl(o)
- if err != nil {
- return err
- }
-
- if upgrade == nil {
- // Nothing to do
- return nil
- }
-
- w := cn.writeBuf(0)
- w.int32(80877103)
- if err = cn.sendStartupPacket(w); err != nil {
- return err
- }
-
- b := cn.scratch[:1]
- _, err = io.ReadFull(cn.c, b)
- if err != nil {
- return err
- }
-
- if b[0] != 'S' {
- return ErrSSLNotSupported
- }
-
- cn.c, err = upgrade(cn.c)
- return err
-}
-
-// isDriverSetting returns true iff a setting is purely for configuring the
-// driver's options and should not be sent to the server in the connection
-// startup packet.
-func isDriverSetting(key string) bool {
- switch key {
- case "host", "port":
- return true
- case "password":
- return true
- case "sslmode", "sslcert", "sslkey", "sslrootcert":
- return true
- case "fallback_application_name":
- return true
- case "connect_timeout":
- return true
- case "disable_prepared_binary_result":
- return true
- case "binary_parameters":
- return true
-
- default:
- return false
- }
-}
-
-func (cn *conn) startup(o values) {
- w := cn.writeBuf(0)
- w.int32(196608)
- // Send the backend the name of the database we want to connect to, and the
- // user we want to connect as. Additionally, we send over any run-time
- // parameters potentially included in the connection string. If the server
- // doesn't recognize any of them, it will reply with an error.
- for k, v := range o {
- if isDriverSetting(k) {
- // skip options which can't be run-time parameters
- continue
- }
- // The protocol requires us to supply the database name as "database"
- // instead of "dbname".
- if k == "dbname" {
- k = "database"
- }
- w.string(k)
- w.string(v)
- }
- w.string("")
- if err := cn.sendStartupPacket(w); err != nil {
- panic(err)
- }
-
- for {
- t, r := cn.recv()
- switch t {
- case 'K':
- cn.processBackendKeyData(r)
- case 'S':
- cn.processParameterStatus(r)
- case 'R':
- cn.auth(r, o)
- case 'Z':
- cn.processReadyForQuery(r)
- return
- default:
- errorf("unknown response for startup: %q", t)
- }
- }
-}
-
-func (cn *conn) auth(r *readBuf, o values) {
- switch code := r.int32(); code {
- case 0:
- // OK
- case 3:
- w := cn.writeBuf('p')
- w.string(o["password"])
- cn.send(w)
-
- t, r := cn.recv()
- if t != 'R' {
- errorf("unexpected password response: %q", t)
- }
-
- if r.int32() != 0 {
- errorf("unexpected authentication response: %q", t)
- }
- case 5:
- s := string(r.next(4))
- w := cn.writeBuf('p')
- w.string("md5" + md5s(md5s(o["password"]+o["user"])+s))
- cn.send(w)
-
- t, r := cn.recv()
- if t != 'R' {
- errorf("unexpected password response: %q", t)
- }
-
- if r.int32() != 0 {
- errorf("unexpected authentication response: %q", t)
- }
- default:
- errorf("unknown authentication response: %d", code)
- }
-}
-
-type format int
-
-const formatText format = 0
-const formatBinary format = 1
-
-// One result-column format code with the value 1 (i.e. all binary).
-var colFmtDataAllBinary = []byte{0, 1, 0, 1}
-
-// No result-column format codes (i.e. all text).
-var colFmtDataAllText = []byte{0, 0}
-
-type stmt struct {
- cn *conn
- name string
- colNames []string
- colFmts []format
- colFmtData []byte
- colTyps []fieldDesc
- paramTyps []oid.Oid
- closed bool
-}
-
-func (st *stmt) Close() (err error) {
- if st.closed {
- return nil
- }
- if st.cn.bad {
- return driver.ErrBadConn
- }
- defer st.cn.errRecover(&err)
-
- w := st.cn.writeBuf('C')
- w.byte('S')
- w.string(st.name)
- st.cn.send(w)
-
- st.cn.send(st.cn.writeBuf('S'))
-
- t, _ := st.cn.recv1()
- if t != '3' {
- st.cn.bad = true
- errorf("unexpected close response: %q", t)
- }
- st.closed = true
-
- t, r := st.cn.recv1()
- if t != 'Z' {
- st.cn.bad = true
- errorf("expected ready for query, but got: %q", t)
- }
- st.cn.processReadyForQuery(r)
-
- return nil
-}
-
-func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) {
- if st.cn.bad {
- return nil, driver.ErrBadConn
- }
- defer st.cn.errRecover(&err)
-
- st.exec(v)
- return &rows{
- cn: st.cn,
- colNames: st.colNames,
- colTyps: st.colTyps,
- colFmts: st.colFmts,
- }, nil
-}
-
-func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) {
- if st.cn.bad {
- return nil, driver.ErrBadConn
- }
- defer st.cn.errRecover(&err)
-
- st.exec(v)
- res, _, err = st.cn.readExecuteResponse("simple query")
- return res, err
-}
-
-func (st *stmt) exec(v []driver.Value) {
- if len(v) >= 65536 {
- errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v))
- }
- if len(v) != len(st.paramTyps) {
- errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps))
- }
-
- cn := st.cn
- w := cn.writeBuf('B')
- w.byte(0) // unnamed portal
- w.string(st.name)
-
- if cn.binaryParameters {
- cn.sendBinaryParameters(w, v)
- } else {
- w.int16(0)
- w.int16(len(v))
- for i, x := range v {
- if x == nil {
- w.int32(-1)
- } else {
- b := encode(&cn.parameterStatus, x, st.paramTyps[i])
- w.int32(len(b))
- w.bytes(b)
- }
- }
- }
- w.bytes(st.colFmtData)
-
- w.next('E')
- w.byte(0)
- w.int32(0)
-
- w.next('S')
- cn.send(w)
-
- cn.readBindResponse()
- cn.postExecuteWorkaround()
-
-}
-
-func (st *stmt) NumInput() int {
- return len(st.paramTyps)
-}
-
-// parseComplete parses the "command tag" from a CommandComplete message, and
-// returns the number of rows affected (if applicable) and a string
-// identifying only the command that was executed, e.g. "ALTER TABLE". If the
-// command tag could not be parsed, parseComplete panics.
-func (cn *conn) parseComplete(commandTag string) (driver.Result, string) {
- commandsWithAffectedRows := []string{
- "SELECT ",
- // INSERT is handled below
- "UPDATE ",
- "DELETE ",
- "FETCH ",
- "MOVE ",
- "COPY ",
- }
-
- var affectedRows *string
- for _, tag := range commandsWithAffectedRows {
- if strings.HasPrefix(commandTag, tag) {
- t := commandTag[len(tag):]
- affectedRows = &t
- commandTag = tag[:len(tag)-1]
- break
- }
- }
- // INSERT also includes the oid of the inserted row in its command tag.
- // Oids in user tables are deprecated, and the oid is only returned when
- // exactly one row is inserted, so it's unlikely to be of value to any
- // real-world application and we can ignore it.
- if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") {
- parts := strings.Split(commandTag, " ")
- if len(parts) != 3 {
- cn.bad = true
- errorf("unexpected INSERT command tag %s", commandTag)
- }
- affectedRows = &parts[len(parts)-1]
- commandTag = "INSERT"
- }
- // There should be no affected rows attached to the tag, just return it
- if affectedRows == nil {
- return driver.RowsAffected(0), commandTag
- }
- n, err := strconv.ParseInt(*affectedRows, 10, 64)
- if err != nil {
- cn.bad = true
- errorf("could not parse commandTag: %s", err)
- }
- return driver.RowsAffected(n), commandTag
-}
-
-type rows struct {
- cn *conn
- finish func()
- colNames []string
- colTyps []fieldDesc
- colFmts []format
- done bool
- rb readBuf
- result driver.Result
- tag string
-}
-
-func (rs *rows) Close() error {
- if finish := rs.finish; finish != nil {
- defer finish()
- }
- // no need to look at cn.bad as Next() will
- for {
- err := rs.Next(nil)
- switch err {
- case nil:
- case io.EOF:
- // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row
- // description, used with HasNextResultSet). We need to fetch messages until
- // we hit a 'Z', which is done by waiting for done to be set.
- if rs.done {
- return nil
- }
- default:
- return err
- }
- }
-}
-
-func (rs *rows) Columns() []string {
- return rs.colNames
-}
-
-func (rs *rows) Result() driver.Result {
- if rs.result == nil {
- return emptyRows
- }
- return rs.result
-}
-
-func (rs *rows) Tag() string {
- return rs.tag
-}
-
-func (rs *rows) Next(dest []driver.Value) (err error) {
- if rs.done {
- return io.EOF
- }
-
- conn := rs.cn
- if conn.bad {
- return driver.ErrBadConn
- }
- defer conn.errRecover(&err)
-
- for {
- t := conn.recv1Buf(&rs.rb)
- switch t {
- case 'E':
- err = parseError(&rs.rb)
- case 'C', 'I':
- if t == 'C' {
- rs.result, rs.tag = conn.parseComplete(rs.rb.string())
- }
- continue
- case 'Z':
- conn.processReadyForQuery(&rs.rb)
- rs.done = true
- if err != nil {
- return err
- }
- return io.EOF
- case 'D':
- n := rs.rb.int16()
- if err != nil {
- conn.bad = true
- errorf("unexpected DataRow after error %s", err)
- }
- if n < len(dest) {
- dest = dest[:n]
- }
- for i := range dest {
- l := rs.rb.int32()
- if l == -1 {
- dest[i] = nil
- continue
- }
- dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i])
- }
- return
- case 'T':
- rs.colNames, rs.colFmts, rs.colTyps = parsePortalRowDescribe(&rs.rb)
- return io.EOF
- default:
- errorf("unexpected message after execute: %q", t)
- }
- }
-}
-
-func (rs *rows) HasNextResultSet() bool {
- return !rs.done
-}
-
-func (rs *rows) NextResultSet() error {
- return nil
-}
-
-// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
-// used as part of an SQL statement. For example:
-//
-// tblname := "my_table"
-// data := "my_data"
-// quoted := pq.QuoteIdentifier(tblname)
-// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
-//
-// Any double quotes in name will be escaped. The quoted identifier will be
-// case sensitive when used in a query. If the input string contains a zero
-// byte, the result will be truncated immediately before it.
-func QuoteIdentifier(name string) string {
- end := strings.IndexRune(name, 0)
- if end > -1 {
- name = name[:end]
- }
- return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
-}
-
-func md5s(s string) string {
- h := md5.New()
- h.Write([]byte(s))
- return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) {
- // Do one pass over the parameters to see if we're going to send any of
- // them over in binary. If we are, create a paramFormats array at the
- // same time.
- var paramFormats []int
- for i, x := range args {
- _, ok := x.([]byte)
- if ok {
- if paramFormats == nil {
- paramFormats = make([]int, len(args))
- }
- paramFormats[i] = 1
- }
- }
- if paramFormats == nil {
- b.int16(0)
- } else {
- b.int16(len(paramFormats))
- for _, x := range paramFormats {
- b.int16(x)
- }
- }
-
- b.int16(len(args))
- for _, x := range args {
- if x == nil {
- b.int32(-1)
- } else {
- datum := binaryEncode(&cn.parameterStatus, x)
- b.int32(len(datum))
- b.bytes(datum)
- }
- }
-}
-
-func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) {
- if len(args) >= 65536 {
- errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args))
- }
-
- b := cn.writeBuf('P')
- b.byte(0) // unnamed statement
- b.string(query)
- b.int16(0)
-
- b.next('B')
- b.int16(0) // unnamed portal and statement
- cn.sendBinaryParameters(b, args)
- b.bytes(colFmtDataAllText)
-
- b.next('D')
- b.byte('P')
- b.byte(0) // unnamed portal
-
- b.next('E')
- b.byte(0)
- b.int32(0)
-
- b.next('S')
- cn.send(b)
-}
-
-func (cn *conn) processParameterStatus(r *readBuf) {
- var err error
-
- param := r.string()
- switch param {
- case "server_version":
- var major1 int
- var major2 int
- var minor int
- _, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor)
- if err == nil {
- cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor
- }
-
- case "TimeZone":
- cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string())
- if err != nil {
- cn.parameterStatus.currentLocation = nil
- }
-
- default:
- // ignore
- }
-}
-
-func (cn *conn) processReadyForQuery(r *readBuf) {
- cn.txnStatus = transactionStatus(r.byte())
-}
-
-func (cn *conn) readReadyForQuery() {
- t, r := cn.recv1()
- switch t {
- case 'Z':
- cn.processReadyForQuery(r)
- return
- default:
- cn.bad = true
- errorf("unexpected message %q; expected ReadyForQuery", t)
- }
-}
-
-func (cn *conn) processBackendKeyData(r *readBuf) {
- cn.processID = r.int32()
- cn.secretKey = r.int32()
-}
-
-func (cn *conn) readParseResponse() {
- t, r := cn.recv1()
- switch t {
- case '1':
- return
- case 'E':
- err := parseError(r)
- cn.readReadyForQuery()
- panic(err)
- default:
- cn.bad = true
- errorf("unexpected Parse response %q", t)
- }
-}
-
-func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) {
- for {
- t, r := cn.recv1()
- switch t {
- case 't':
- nparams := r.int16()
- paramTyps = make([]oid.Oid, nparams)
- for i := range paramTyps {
- paramTyps[i] = r.oid()
- }
- case 'n':
- return paramTyps, nil, nil
- case 'T':
- colNames, colTyps = parseStatementRowDescribe(r)
- return paramTyps, colNames, colTyps
- case 'E':
- err := parseError(r)
- cn.readReadyForQuery()
- panic(err)
- default:
- cn.bad = true
- errorf("unexpected Describe statement response %q", t)
- }
- }
-}
-
-func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) {
- t, r := cn.recv1()
- switch t {
- case 'T':
- return parsePortalRowDescribe(r)
- case 'n':
- return nil, nil, nil
- case 'E':
- err := parseError(r)
- cn.readReadyForQuery()
- panic(err)
- default:
- cn.bad = true
- errorf("unexpected Describe response %q", t)
- }
- panic("not reached")
-}
-
-func (cn *conn) readBindResponse() {
- t, r := cn.recv1()
- switch t {
- case '2':
- return
- case 'E':
- err := parseError(r)
- cn.readReadyForQuery()
- panic(err)
- default:
- cn.bad = true
- errorf("unexpected Bind response %q", t)
- }
-}
-
-func (cn *conn) postExecuteWorkaround() {
- // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores
- // any errors from rows.Next, which masks errors that happened during the
- // execution of the query. To avoid the problem in common cases, we wait
- // here for one more message from the database. If it's not an error the
- // query will likely succeed (or perhaps has already, if it's a
- // CommandComplete), so we push the message into the conn struct; recv1
- // will return it as the next message for rows.Next or rows.Close.
- // However, if it's an error, we wait until ReadyForQuery and then return
- // the error to our caller.
- for {
- t, r := cn.recv1()
- switch t {
- case 'E':
- err := parseError(r)
- cn.readReadyForQuery()
- panic(err)
- case 'C', 'D', 'I':
- // the query didn't fail, but we can't process this message
- cn.saveMessage(t, r)
- return
- default:
- cn.bad = true
- errorf("unexpected message during extended query execution: %q", t)
- }
- }
-}
-
-// Only for Exec(), since we ignore the returned data
-func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) {
- for {
- t, r := cn.recv1()
- switch t {
- case 'C':
- if err != nil {
- cn.bad = true
- errorf("unexpected CommandComplete after error %s", err)
- }
- res, commandTag = cn.parseComplete(r.string())
- case 'Z':
- cn.processReadyForQuery(r)
- if res == nil && err == nil {
- err = errUnexpectedReady
- }
- return res, commandTag, err
- case 'E':
- err = parseError(r)
- case 'T', 'D', 'I':
- if err != nil {
- cn.bad = true
- errorf("unexpected %q after error %s", t, err)
- }
- if t == 'I' {
- res = emptyRows
- }
- // ignore any results
- default:
- cn.bad = true
- errorf("unknown %s response: %q", protocolState, t)
- }
- }
-}
-
-func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) {
- n := r.int16()
- colNames = make([]string, n)
- colTyps = make([]fieldDesc, n)
- for i := range colNames {
- colNames[i] = r.string()
- r.next(6)
- colTyps[i].OID = r.oid()
- colTyps[i].Len = r.int16()
- colTyps[i].Mod = r.int32()
- // format code not known when describing a statement; always 0
- r.next(2)
- }
- return
-}
-
-func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) {
- n := r.int16()
- colNames = make([]string, n)
- colFmts = make([]format, n)
- colTyps = make([]fieldDesc, n)
- for i := range colNames {
- colNames[i] = r.string()
- r.next(6)
- colTyps[i].OID = r.oid()
- colTyps[i].Len = r.int16()
- colTyps[i].Mod = r.int32()
- colFmts[i] = format(r.int16())
- }
- return
-}
-
-// parseEnviron tries to mimic some of libpq's environment handling
-//
-// To ease testing, it does not directly reference os.Environ, but is
-// designed to accept its output.
-//
-// Environment-set connection information is intended to have a higher
-// precedence than a library default but lower than any explicitly
-// passed information (such as in the URL or connection string).
-func parseEnviron(env []string) (out map[string]string) {
- out = make(map[string]string)
-
- for _, v := range env {
- parts := strings.SplitN(v, "=", 2)
-
- accrue := func(keyname string) {
- out[keyname] = parts[1]
- }
- unsupported := func() {
- panic(fmt.Sprintf("setting %v not supported", parts[0]))
- }
-
- // The order of these is the same as is seen in the
- // PostgreSQL 9.1 manual. Unsupported but well-defined
- // keys cause a panic; these should be unset prior to
- // execution. Options which pq expects to be set to a
- // certain value are allowed, but must be set to that
- // value if present (they can, of course, be absent).
- switch parts[0] {
- case "PGHOST":
- accrue("host")
- case "PGHOSTADDR":
- unsupported()
- case "PGPORT":
- accrue("port")
- case "PGDATABASE":
- accrue("dbname")
- case "PGUSER":
- accrue("user")
- case "PGPASSWORD":
- accrue("password")
- case "PGSERVICE", "PGSERVICEFILE", "PGREALM":
- unsupported()
- case "PGOPTIONS":
- accrue("options")
- case "PGAPPNAME":
- accrue("application_name")
- case "PGSSLMODE":
- accrue("sslmode")
- case "PGSSLCERT":
- accrue("sslcert")
- case "PGSSLKEY":
- accrue("sslkey")
- case "PGSSLROOTCERT":
- accrue("sslrootcert")
- case "PGREQUIRESSL", "PGSSLCRL":
- unsupported()
- case "PGREQUIREPEER":
- unsupported()
- case "PGKRBSRVNAME", "PGGSSLIB":
- unsupported()
- case "PGCONNECT_TIMEOUT":
- accrue("connect_timeout")
- case "PGCLIENTENCODING":
- accrue("client_encoding")
- case "PGDATESTYLE":
- accrue("datestyle")
- case "PGTZ":
- accrue("timezone")
- case "PGGEQO":
- accrue("geqo")
- case "PGSYSCONFDIR", "PGLOCALEDIR":
- unsupported()
- }
- }
-
- return out
-}
-
-// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8".
-func isUTF8(name string) bool {
- // Recognize all sorts of silly things as "UTF-8", like Postgres does
- s := strings.Map(alnumLowerASCII, name)
- return s == "utf8" || s == "unicode"
-}
-
-func alnumLowerASCII(ch rune) rune {
- if 'A' <= ch && ch <= 'Z' {
- return ch + ('a' - 'A')
- }
- if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' {
- return ch
- }
- return -1 // discard
-}
diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go
deleted file mode 100644
index a5254f2b..00000000
--- a/vendor/github.com/lib/pq/conn_go18.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// +build go1.8
-
-package pq
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "fmt"
- "io"
- "io/ioutil"
-)
-
-// Implement the "QueryerContext" interface
-func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
- list := make([]driver.Value, len(args))
- for i, nv := range args {
- list[i] = nv.Value
- }
- finish := cn.watchCancel(ctx)
- r, err := cn.query(query, list)
- if err != nil {
- if finish != nil {
- finish()
- }
- return nil, err
- }
- r.finish = finish
- return r, nil
-}
-
-// Implement the "ExecerContext" interface
-func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
- list := make([]driver.Value, len(args))
- for i, nv := range args {
- list[i] = nv.Value
- }
-
- if finish := cn.watchCancel(ctx); finish != nil {
- defer finish()
- }
-
- return cn.Exec(query, list)
-}
-
-// Implement the "ConnBeginTx" interface
-func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
- var mode string
-
- switch sql.IsolationLevel(opts.Isolation) {
- case sql.LevelDefault:
- // Don't touch mode: use the server's default
- case sql.LevelReadUncommitted:
- mode = " ISOLATION LEVEL READ UNCOMMITTED"
- case sql.LevelReadCommitted:
- mode = " ISOLATION LEVEL READ COMMITTED"
- case sql.LevelRepeatableRead:
- mode = " ISOLATION LEVEL REPEATABLE READ"
- case sql.LevelSerializable:
- mode = " ISOLATION LEVEL SERIALIZABLE"
- default:
- return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
- }
-
- if opts.ReadOnly {
- mode += " READ ONLY"
- } else {
- mode += " READ WRITE"
- }
-
- tx, err := cn.begin(mode)
- if err != nil {
- return nil, err
- }
- cn.txnFinish = cn.watchCancel(ctx)
- return tx, nil
-}
-
-func (cn *conn) watchCancel(ctx context.Context) func() {
- if done := ctx.Done(); done != nil {
- finished := make(chan struct{})
- go func() {
- select {
- case <-done:
- _ = cn.cancel()
- finished <- struct{}{}
- case <-finished:
- }
- }()
- return func() {
- select {
- case <-finished:
- case finished <- struct{}{}:
- }
- }
- }
- return nil
-}
-
-func (cn *conn) cancel() error {
- c, err := dial(cn.dialer, cn.opts)
- if err != nil {
- return err
- }
- defer c.Close()
-
- {
- can := conn{
- c: c,
- }
- err = can.ssl(cn.opts)
- if err != nil {
- return err
- }
-
- w := can.writeBuf(0)
- w.int32(80877102) // cancel request code
- w.int32(cn.processID)
- w.int32(cn.secretKey)
-
- if err := can.sendStartupPacket(w); err != nil {
- return err
- }
- }
-
- // Read until EOF to ensure that the server received the cancel.
- {
- _, err := io.Copy(ioutil.Discard, c)
- return err
- }
-}
diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go
deleted file mode 100644
index 9e66eb5d..00000000
--- a/vendor/github.com/lib/pq/connector.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// +build go1.10
-
-package pq
-
-import (
- "context"
- "database/sql/driver"
-)
-
-// Connector represents a fixed configuration for the pq driver with a given
-// name. Connector satisfies the database/sql/driver Connector interface and
-// can be used to create any number of DB Conn's via the database/sql OpenDB
-// function.
-//
-// See https://golang.org/pkg/database/sql/driver/#Connector.
-// See https://golang.org/pkg/database/sql/#OpenDB.
-type connector struct {
- name string
-}
-
-// Connect returns a connection to the database using the fixed configuration
-// of this Connector. Context is not used.
-func (c *connector) Connect(_ context.Context) (driver.Conn, error) {
- return (&Driver{}).Open(c.name)
-}
-
-// Driver returnst the underlying driver of this Connector.
-func (c *connector) Driver() driver.Driver {
- return &Driver{}
-}
-
-var _ driver.Connector = &connector{}
-
-// NewConnector returns a connector for the pq driver in a fixed configuration
-// with the given name. The returned connector can be used to create any number
-// of equivalent Conn's. The returned connector is intended to be used with
-// database/sql.OpenDB.
-//
-// See https://golang.org/pkg/database/sql/driver/#Connector.
-// See https://golang.org/pkg/database/sql/#OpenDB.
-func NewConnector(name string) (driver.Connector, error) {
- return &connector{name: name}, nil
-}
diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go
deleted file mode 100644
index 345c2398..00000000
--- a/vendor/github.com/lib/pq/copy.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package pq
-
-import (
- "database/sql/driver"
- "encoding/binary"
- "errors"
- "fmt"
- "sync"
-)
-
-var (
- errCopyInClosed = errors.New("pq: copyin statement has already been closed")
- errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
- errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
- errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
- errCopyInProgress = errors.New("pq: COPY in progress")
-)
-
-// CopyIn creates a COPY FROM statement which can be prepared with
-// Tx.Prepare(). The target table should be visible in search_path.
-func CopyIn(table string, columns ...string) string {
- stmt := "COPY " + QuoteIdentifier(table) + " ("
- for i, col := range columns {
- if i != 0 {
- stmt += ", "
- }
- stmt += QuoteIdentifier(col)
- }
- stmt += ") FROM STDIN"
- return stmt
-}
-
-// CopyInSchema creates a COPY FROM statement which can be prepared with
-// Tx.Prepare().
-func CopyInSchema(schema, table string, columns ...string) string {
- stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
- for i, col := range columns {
- if i != 0 {
- stmt += ", "
- }
- stmt += QuoteIdentifier(col)
- }
- stmt += ") FROM STDIN"
- return stmt
-}
-
-type copyin struct {
- cn *conn
- buffer []byte
- rowData chan []byte
- done chan bool
-
- closed bool
-
- sync.Mutex // guards err
- err error
-}
-
-const ciBufferSize = 64 * 1024
-
-// flush buffer before the buffer is filled up and needs reallocation
-const ciBufferFlushSize = 63 * 1024
-
-func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
- if !cn.isInTransaction() {
- return nil, errCopyNotSupportedOutsideTxn
- }
-
- ci := ©in{
- cn: cn,
- buffer: make([]byte, 0, ciBufferSize),
- rowData: make(chan []byte),
- done: make(chan bool, 1),
- }
- // add CopyData identifier + 4 bytes for message length
- ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
-
- b := cn.writeBuf('Q')
- b.string(q)
- cn.send(b)
-
-awaitCopyInResponse:
- for {
- t, r := cn.recv1()
- switch t {
- case 'G':
- if r.byte() != 0 {
- err = errBinaryCopyNotSupported
- break awaitCopyInResponse
- }
- go ci.resploop()
- return ci, nil
- case 'H':
- err = errCopyToNotSupported
- break awaitCopyInResponse
- case 'E':
- err = parseError(r)
- case 'Z':
- if err == nil {
- ci.setBad()
- errorf("unexpected ReadyForQuery in response to COPY")
- }
- cn.processReadyForQuery(r)
- return nil, err
- default:
- ci.setBad()
- errorf("unknown response for copy query: %q", t)
- }
- }
-
- // something went wrong, abort COPY before we return
- b = cn.writeBuf('f')
- b.string(err.Error())
- cn.send(b)
-
- for {
- t, r := cn.recv1()
- switch t {
- case 'c', 'C', 'E':
- case 'Z':
- // correctly aborted, we're done
- cn.processReadyForQuery(r)
- return nil, err
- default:
- ci.setBad()
- errorf("unknown response for CopyFail: %q", t)
- }
- }
-}
-
-func (ci *copyin) flush(buf []byte) {
- // set message length (without message identifier)
- binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
-
- _, err := ci.cn.c.Write(buf)
- if err != nil {
- panic(err)
- }
-}
-
-func (ci *copyin) resploop() {
- for {
- var r readBuf
- t, err := ci.cn.recvMessage(&r)
- if err != nil {
- ci.setBad()
- ci.setError(err)
- ci.done <- true
- return
- }
- switch t {
- case 'C':
- // complete
- case 'N':
- // NoticeResponse
- case 'Z':
- ci.cn.processReadyForQuery(&r)
- ci.done <- true
- return
- case 'E':
- err := parseError(&r)
- ci.setError(err)
- default:
- ci.setBad()
- ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
- ci.done <- true
- return
- }
- }
-}
-
-func (ci *copyin) setBad() {
- ci.Lock()
- ci.cn.bad = true
- ci.Unlock()
-}
-
-func (ci *copyin) isBad() bool {
- ci.Lock()
- b := ci.cn.bad
- ci.Unlock()
- return b
-}
-
-func (ci *copyin) isErrorSet() bool {
- ci.Lock()
- isSet := (ci.err != nil)
- ci.Unlock()
- return isSet
-}
-
-// setError() sets ci.err if one has not been set already. Caller must not be
-// holding ci.Mutex.
-func (ci *copyin) setError(err error) {
- ci.Lock()
- if ci.err == nil {
- ci.err = err
- }
- ci.Unlock()
-}
-
-func (ci *copyin) NumInput() int {
- return -1
-}
-
-func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
- return nil, ErrNotSupported
-}
-
-// Exec inserts values into the COPY stream. The insert is asynchronous
-// and Exec can return errors from previous Exec calls to the same
-// COPY stmt.
-//
-// You need to call Exec(nil) to sync the COPY stream and to get any
-// errors from pending data, since Stmt.Close() doesn't return errors
-// to the user.
-func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
- if ci.closed {
- return nil, errCopyInClosed
- }
-
- if ci.isBad() {
- return nil, driver.ErrBadConn
- }
- defer ci.cn.errRecover(&err)
-
- if ci.isErrorSet() {
- return nil, ci.err
- }
-
- if len(v) == 0 {
- return nil, ci.Close()
- }
-
- numValues := len(v)
- for i, value := range v {
- ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
- if i < numValues-1 {
- ci.buffer = append(ci.buffer, '\t')
- }
- }
-
- ci.buffer = append(ci.buffer, '\n')
-
- if len(ci.buffer) > ciBufferFlushSize {
- ci.flush(ci.buffer)
- // reset buffer, keep bytes for message identifier and length
- ci.buffer = ci.buffer[:5]
- }
-
- return driver.RowsAffected(0), nil
-}
-
-func (ci *copyin) Close() (err error) {
- if ci.closed { // Don't do anything, we're already closed
- return nil
- }
- ci.closed = true
-
- if ci.isBad() {
- return driver.ErrBadConn
- }
- defer ci.cn.errRecover(&err)
-
- if len(ci.buffer) > 0 {
- ci.flush(ci.buffer)
- }
- // Avoid touching the scratch buffer as resploop could be using it.
- err = ci.cn.sendSimpleMessage('c')
- if err != nil {
- return err
- }
-
- <-ci.done
- ci.cn.inCopy = false
-
- if ci.isErrorSet() {
- err = ci.err
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go
deleted file mode 100644
index a1b02971..00000000
--- a/vendor/github.com/lib/pq/doc.go
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
-Package pq is a pure Go Postgres driver for the database/sql package.
-
-In most cases clients will use the database/sql package instead of
-using this package directly. For example:
-
- import (
- "database/sql"
-
- _ "github.com/lib/pq"
- )
-
- func main() {
- connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
- db, err := sql.Open("postgres", connStr)
- if err != nil {
- log.Fatal(err)
- }
-
- age := 21
- rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
- …
- }
-
-You can also connect to a database using a URL. For example:
-
- connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
- db, err := sql.Open("postgres", connStr)
-
-
-Connection String Parameters
-
-
-Similarly to libpq, when establishing a connection using pq you are expected to
-supply a connection string containing zero or more parameters.
-A subset of the connection parameters supported by libpq are also supported by pq.
-Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
-directly in the connection string. This is different from libpq, which does not allow
-run-time parameters in the connection string, instead requiring you to supply
-them in the options parameter.
-
-For compatibility with libpq, the following special connection parameters are
-supported:
-
- * dbname - The name of the database to connect to
- * user - The user to sign in as
- * password - The user's password
- * host - The host to connect to. Values that start with / are for unix
- domain sockets. (default is localhost)
- * port - The port to bind to. (default is 5432)
- * sslmode - Whether or not to use SSL (default is require, this is not
- the default for libpq)
- * fallback_application_name - An application_name to fall back to if one isn't provided.
- * connect_timeout - Maximum wait for connection, in seconds. Zero or
- not specified means wait indefinitely.
- * sslcert - Cert file location. The file must contain PEM encoded data.
- * sslkey - Key file location. The file must contain PEM encoded data.
- * sslrootcert - The location of the root certificate file. The file
- must contain PEM encoded data.
-
-Valid values for sslmode are:
-
- * disable - No SSL
- * require - Always SSL (skip verification)
- * verify-ca - Always SSL (verify that the certificate presented by the
- server was signed by a trusted CA)
- * verify-full - Always SSL (verify that the certification presented by
- the server was signed by a trusted CA and the server host name
- matches the one in the certificate)
-
-See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
-for more information about connection string parameters.
-
-Use single quotes for values that contain whitespace:
-
- "user=pqgotest password='with spaces'"
-
-A backslash will escape the next character in values:
-
- "user=space\ man password='it\'s valid'"
-
-Note that the connection parameter client_encoding (which sets the
-text encoding for the connection) may be set but must be "UTF8",
-matching with the same rules as Postgres. It is an error to provide
-any other value.
-
-In addition to the parameters listed above, any run-time parameter that can be
-set at backend start time can be set in the connection string. For more
-information, see
-http://www.postgresql.org/docs/current/static/runtime-config.html.
-
-Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
-supported by libpq are also supported by pq. If any of the environment
-variables not supported by pq are set, pq will panic during connection
-establishment. Environment variables have a lower precedence than explicitly
-provided connection parameters.
-
-The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
-is supported, but on Windows PGPASSFILE must be specified explicitly.
-
-
-Queries
-
-
-database/sql does not dictate any specific format for parameter
-markers in query strings, and pq uses the Postgres-native ordinal markers,
-as shown above. The same marker can be reused for the same parameter:
-
- rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
- OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
-
-pq does not support the LastInsertId() method of the Result type in database/sql.
-To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
-RETURNING clause with a standard Query or QueryRow call:
-
- var userid int
- err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
- VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
-
-For more details on RETURNING, see the Postgres documentation:
-
- http://www.postgresql.org/docs/current/static/sql-insert.html
- http://www.postgresql.org/docs/current/static/sql-update.html
- http://www.postgresql.org/docs/current/static/sql-delete.html
-
-For additional instructions on querying see the documentation for the database/sql package.
-
-
-Data Types
-
-
-Parameters pass through driver.DefaultParameterConverter before they are handled
-by this package. When the binary_parameters connection option is enabled,
-[]byte values are sent directly to the backend as data in binary format.
-
-This package returns the following types for values from the PostgreSQL backend:
-
- - integer types smallint, integer, and bigint are returned as int64
- - floating-point types real and double precision are returned as float64
- - character types char, varchar, and text are returned as string
- - temporal types date, time, timetz, timestamp, and timestamptz are
- returned as time.Time
- - the boolean type is returned as bool
- - the bytea type is returned as []byte
-
-All other types are returned directly from the backend as []byte values in text format.
-
-
-Errors
-
-
-pq may return errors of type *pq.Error which can be interrogated for error details:
-
- if err, ok := err.(*pq.Error); ok {
- fmt.Println("pq error:", err.Code.Name())
- }
-
-See the pq.Error type for details.
-
-
-Bulk imports
-
-You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
-pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
-handle can then be repeatedly "executed" to copy data into the target table.
-After all data has been processed you should call Exec() once with no arguments
-to flush all buffered data. Any call to Exec() might return an error which
-should be handled appropriately, but because of the internal buffering an error
-returned by Exec() might not be related to the data passed in the call that
-failed.
-
-CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
-explicit transaction in pq.
-
-Usage example:
-
- txn, err := db.Begin()
- if err != nil {
- log.Fatal(err)
- }
-
- stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
- if err != nil {
- log.Fatal(err)
- }
-
- for _, user := range users {
- _, err = stmt.Exec(user.Name, int64(user.Age))
- if err != nil {
- log.Fatal(err)
- }
- }
-
- _, err = stmt.Exec()
- if err != nil {
- log.Fatal(err)
- }
-
- err = stmt.Close()
- if err != nil {
- log.Fatal(err)
- }
-
- err = txn.Commit()
- if err != nil {
- log.Fatal(err)
- }
-
-
-Notifications
-
-
-PostgreSQL supports a simple publish/subscribe model over database
-connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
-for more information about the general mechanism.
-
-To start listening for notifications, you first have to open a new connection
-to the database by calling NewListener. This connection can not be used for
-anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
-channel"; once a notification channel is open, a notification generated on that
-channel will effect a send on the Listener.Notify channel. A notification
-channel will remain open until Unlisten is called, though connection loss might
-result in some notifications being lost. To solve this problem, Listener sends
-a nil pointer over the Notify channel any time the connection is re-established
-following a connection loss. The application can get information about the
-state of the underlying connection by setting an event callback in the call to
-NewListener.
-
-A single Listener can safely be used from concurrent goroutines, which means
-that there is often no need to create more than one Listener in your
-application. However, a Listener is always connected to a single database, so
-you will need to create a new Listener instance for every database you want to
-receive notifications in.
-
-The channel name in both Listen and Unlisten is case sensitive, and can contain
-any characters legal in an identifier (see
-http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
-for more information). Note that the channel name will be truncated to 63
-bytes by the PostgreSQL server.
-
-You can find a complete, working example of Listener usage at
-http://godoc.org/github.com/lib/pq/example/listen.
-
-*/
-package pq
diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go
deleted file mode 100644
index 3b0d365f..00000000
--- a/vendor/github.com/lib/pq/encode.go
+++ /dev/null
@@ -1,603 +0,0 @@
-package pq
-
-import (
- "bytes"
- "database/sql/driver"
- "encoding/binary"
- "encoding/hex"
- "errors"
- "fmt"
- "math"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/lib/pq/oid"
-)
-
-func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
- switch v := x.(type) {
- case []byte:
- return v
- default:
- return encode(parameterStatus, x, oid.T_unknown)
- }
-}
-
-func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
- switch v := x.(type) {
- case int64:
- return strconv.AppendInt(nil, v, 10)
- case float64:
- return strconv.AppendFloat(nil, v, 'f', -1, 64)
- case []byte:
- if pgtypOid == oid.T_bytea {
- return encodeBytea(parameterStatus.serverVersion, v)
- }
-
- return v
- case string:
- if pgtypOid == oid.T_bytea {
- return encodeBytea(parameterStatus.serverVersion, []byte(v))
- }
-
- return []byte(v)
- case bool:
- return strconv.AppendBool(nil, v)
- case time.Time:
- return formatTs(v)
-
- default:
- errorf("encode: unknown type for %T", v)
- }
-
- panic("not reached")
-}
-
-func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
- switch f {
- case formatBinary:
- return binaryDecode(parameterStatus, s, typ)
- case formatText:
- return textDecode(parameterStatus, s, typ)
- default:
- panic("not reached")
- }
-}
-
-func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
- switch typ {
- case oid.T_bytea:
- return s
- case oid.T_int8:
- return int64(binary.BigEndian.Uint64(s))
- case oid.T_int4:
- return int64(int32(binary.BigEndian.Uint32(s)))
- case oid.T_int2:
- return int64(int16(binary.BigEndian.Uint16(s)))
- case oid.T_uuid:
- b, err := decodeUUIDBinary(s)
- if err != nil {
- panic(err)
- }
- return b
-
- default:
- errorf("don't know how to decode binary parameter of type %d", uint32(typ))
- }
-
- panic("not reached")
-}
-
-func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
- switch typ {
- case oid.T_char, oid.T_varchar, oid.T_text:
- return string(s)
- case oid.T_bytea:
- b, err := parseBytea(s)
- if err != nil {
- errorf("%s", err)
- }
- return b
- case oid.T_timestamptz:
- return parseTs(parameterStatus.currentLocation, string(s))
- case oid.T_timestamp, oid.T_date:
- return parseTs(nil, string(s))
- case oid.T_time:
- return mustParse("15:04:05", typ, s)
- case oid.T_timetz:
- return mustParse("15:04:05-07", typ, s)
- case oid.T_bool:
- return s[0] == 't'
- case oid.T_int8, oid.T_int4, oid.T_int2:
- i, err := strconv.ParseInt(string(s), 10, 64)
- if err != nil {
- errorf("%s", err)
- }
- return i
- case oid.T_float4, oid.T_float8:
- bits := 64
- if typ == oid.T_float4 {
- bits = 32
- }
- f, err := strconv.ParseFloat(string(s), bits)
- if err != nil {
- errorf("%s", err)
- }
- return f
- }
-
- return s
-}
-
-// appendEncodedText encodes item in text format as required by COPY
-// and appends to buf
-func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
- switch v := x.(type) {
- case int64:
- return strconv.AppendInt(buf, v, 10)
- case float64:
- return strconv.AppendFloat(buf, v, 'f', -1, 64)
- case []byte:
- encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
- return appendEscapedText(buf, string(encodedBytea))
- case string:
- return appendEscapedText(buf, v)
- case bool:
- return strconv.AppendBool(buf, v)
- case time.Time:
- return append(buf, formatTs(v)...)
- case nil:
- return append(buf, "\\N"...)
- default:
- errorf("encode: unknown type for %T", v)
- }
-
- panic("not reached")
-}
-
-func appendEscapedText(buf []byte, text string) []byte {
- escapeNeeded := false
- startPos := 0
- var c byte
-
- // check if we need to escape
- for i := 0; i < len(text); i++ {
- c = text[i]
- if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
- escapeNeeded = true
- startPos = i
- break
- }
- }
- if !escapeNeeded {
- return append(buf, text...)
- }
-
- // copy till first char to escape, iterate the rest
- result := append(buf, text[:startPos]...)
- for i := startPos; i < len(text); i++ {
- c = text[i]
- switch c {
- case '\\':
- result = append(result, '\\', '\\')
- case '\n':
- result = append(result, '\\', 'n')
- case '\r':
- result = append(result, '\\', 'r')
- case '\t':
- result = append(result, '\\', 't')
- default:
- result = append(result, c)
- }
- }
- return result
-}
-
-func mustParse(f string, typ oid.Oid, s []byte) time.Time {
- str := string(s)
-
- // check for a 30-minute-offset timezone
- if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
- str[len(str)-3] == ':' {
- f += ":00"
- }
- t, err := time.Parse(f, str)
- if err != nil {
- errorf("decode: %s", err)
- }
- return t
-}
-
-var errInvalidTimestamp = errors.New("invalid timestamp")
-
-type timestampParser struct {
- err error
-}
-
-func (p *timestampParser) expect(str string, char byte, pos int) {
- if p.err != nil {
- return
- }
- if pos+1 > len(str) {
- p.err = errInvalidTimestamp
- return
- }
- if c := str[pos]; c != char && p.err == nil {
- p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
- }
-}
-
-func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
- if p.err != nil {
- return 0
- }
- if begin < 0 || end < 0 || begin > end || end > len(str) {
- p.err = errInvalidTimestamp
- return 0
- }
- result, err := strconv.Atoi(str[begin:end])
- if err != nil {
- if p.err == nil {
- p.err = fmt.Errorf("expected number; got '%v'", str)
- }
- return 0
- }
- return result
-}
-
-// The location cache caches the time zones typically used by the client.
-type locationCache struct {
- cache map[int]*time.Location
- lock sync.Mutex
-}
-
-// All connections share the same list of timezones. Benchmarking shows that
-// about 5% speed could be gained by putting the cache in the connection and
-// losing the mutex, at the cost of a small amount of memory and a somewhat
-// significant increase in code complexity.
-var globalLocationCache = newLocationCache()
-
-func newLocationCache() *locationCache {
- return &locationCache{cache: make(map[int]*time.Location)}
-}
-
-// Returns the cached timezone for the specified offset, creating and caching
-// it if necessary.
-func (c *locationCache) getLocation(offset int) *time.Location {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- location, ok := c.cache[offset]
- if !ok {
- location = time.FixedZone("", offset)
- c.cache[offset] = location
- }
-
- return location
-}
-
-var infinityTsEnabled = false
-var infinityTsNegative time.Time
-var infinityTsPositive time.Time
-
-const (
- infinityTsEnabledAlready = "pq: infinity timestamp enabled already"
- infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
-)
-
-// EnableInfinityTs controls the handling of Postgres' "-infinity" and
-// "infinity" "timestamp"s.
-//
-// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
-// []byte("-infinity") and []byte("infinity") respectively, and potentially
-// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
-// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
-//
-// Once EnableInfinityTs has been called, all connections created using this
-// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
-// "timestamp with time zone" and "date" types to the predefined minimum and
-// maximum times, respectively. When encoding time.Time values, any time which
-// equals or precedes the predefined minimum time will be encoded to
-// "-infinity". Any values at or past the maximum time will similarly be
-// encoded to "infinity".
-//
-// If EnableInfinityTs is called with negative >= positive, it will panic.
-// Calling EnableInfinityTs after a connection has been established results in
-// undefined behavior. If EnableInfinityTs is called more than once, it will
-// panic.
-func EnableInfinityTs(negative time.Time, positive time.Time) {
- if infinityTsEnabled {
- panic(infinityTsEnabledAlready)
- }
- if !negative.Before(positive) {
- panic(infinityTsNegativeMustBeSmaller)
- }
- infinityTsEnabled = true
- infinityTsNegative = negative
- infinityTsPositive = positive
-}
-
-/*
- * Testing might want to toggle infinityTsEnabled
- */
-func disableInfinityTs() {
- infinityTsEnabled = false
-}
-
-// This is a time function specific to the Postgres default DateStyle
-// setting ("ISO, MDY"), the only one we currently support. This
-// accounts for the discrepancies between the parsing available with
-// time.Parse and the Postgres date formatting quirks.
-func parseTs(currentLocation *time.Location, str string) interface{} {
- switch str {
- case "-infinity":
- if infinityTsEnabled {
- return infinityTsNegative
- }
- return []byte(str)
- case "infinity":
- if infinityTsEnabled {
- return infinityTsPositive
- }
- return []byte(str)
- }
- t, err := ParseTimestamp(currentLocation, str)
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// ParseTimestamp parses Postgres' text format. It returns a time.Time in
-// currentLocation iff that time's offset agrees with the offset sent from the
-// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
-// fixed offset offset provided by the Postgres server.
-func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
- p := timestampParser{}
-
- monSep := strings.IndexRune(str, '-')
- // this is Gregorian year, not ISO Year
- // In Gregorian system, the year 1 BC is followed by AD 1
- year := p.mustAtoi(str, 0, monSep)
- daySep := monSep + 3
- month := p.mustAtoi(str, monSep+1, daySep)
- p.expect(str, '-', daySep)
- timeSep := daySep + 3
- day := p.mustAtoi(str, daySep+1, timeSep)
-
- minLen := monSep + len("01-01") + 1
-
- isBC := strings.HasSuffix(str, " BC")
- if isBC {
- minLen += 3
- }
-
- var hour, minute, second int
- if len(str) > minLen {
- p.expect(str, ' ', timeSep)
- minSep := timeSep + 3
- p.expect(str, ':', minSep)
- hour = p.mustAtoi(str, timeSep+1, minSep)
- secSep := minSep + 3
- p.expect(str, ':', secSep)
- minute = p.mustAtoi(str, minSep+1, secSep)
- secEnd := secSep + 3
- second = p.mustAtoi(str, secSep+1, secEnd)
- }
- remainderIdx := monSep + len("01-01 00:00:00") + 1
- // Three optional (but ordered) sections follow: the
- // fractional seconds, the time zone offset, and the BC
- // designation. We set them up here and adjust the other
- // offsets if the preceding sections exist.
-
- nanoSec := 0
- tzOff := 0
-
- if remainderIdx < len(str) && str[remainderIdx] == '.' {
- fracStart := remainderIdx + 1
- fracOff := strings.IndexAny(str[fracStart:], "-+ ")
- if fracOff < 0 {
- fracOff = len(str) - fracStart
- }
- fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
- nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
-
- remainderIdx += fracOff + 1
- }
- if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
- // time zone separator is always '-' or '+' (UTC is +00)
- var tzSign int
- switch c := str[tzStart]; c {
- case '-':
- tzSign = -1
- case '+':
- tzSign = +1
- default:
- return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
- }
- tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
- remainderIdx += 3
- var tzMin, tzSec int
- if remainderIdx < len(str) && str[remainderIdx] == ':' {
- tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
- remainderIdx += 3
- }
- if remainderIdx < len(str) && str[remainderIdx] == ':' {
- tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
- remainderIdx += 3
- }
- tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
- }
- var isoYear int
-
- if isBC {
- isoYear = 1 - year
- remainderIdx += 3
- } else {
- isoYear = year
- }
- if remainderIdx < len(str) {
- return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
- }
- t := time.Date(isoYear, time.Month(month), day,
- hour, minute, second, nanoSec,
- globalLocationCache.getLocation(tzOff))
-
- if currentLocation != nil {
- // Set the location of the returned Time based on the session's
- // TimeZone value, but only if the local time zone database agrees with
- // the remote database on the offset.
- lt := t.In(currentLocation)
- _, newOff := lt.Zone()
- if newOff == tzOff {
- t = lt
- }
- }
-
- return t, p.err
-}
-
-// formatTs formats t into a format postgres understands.
-func formatTs(t time.Time) []byte {
- if infinityTsEnabled {
- // t <= -infinity : ! (t > -infinity)
- if !t.After(infinityTsNegative) {
- return []byte("-infinity")
- }
- // t >= infinity : ! (!t < infinity)
- if !t.Before(infinityTsPositive) {
- return []byte("infinity")
- }
- }
- return FormatTimestamp(t)
-}
-
-// FormatTimestamp formats t into Postgres' text format for timestamps.
-func FormatTimestamp(t time.Time) []byte {
- // Need to send dates before 0001 A.D. with " BC" suffix, instead of the
- // minus sign preferred by Go.
- // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
- bc := false
- if t.Year() <= 0 {
- // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
- t = t.AddDate((-t.Year())*2+1, 0, 0)
- bc = true
- }
- b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
-
- _, offset := t.Zone()
- offset = offset % 60
- if offset != 0 {
- // RFC3339Nano already printed the minus sign
- if offset < 0 {
- offset = -offset
- }
-
- b = append(b, ':')
- if offset < 10 {
- b = append(b, '0')
- }
- b = strconv.AppendInt(b, int64(offset), 10)
- }
-
- if bc {
- b = append(b, " BC"...)
- }
- return b
-}
-
-// Parse a bytea value received from the server. Both "hex" and the legacy
-// "escape" format are supported.
-func parseBytea(s []byte) (result []byte, err error) {
- if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
- // bytea_output = hex
- s = s[2:] // trim off leading "\\x"
- result = make([]byte, hex.DecodedLen(len(s)))
- _, err := hex.Decode(result, s)
- if err != nil {
- return nil, err
- }
- } else {
- // bytea_output = escape
- for len(s) > 0 {
- if s[0] == '\\' {
- // escaped '\\'
- if len(s) >= 2 && s[1] == '\\' {
- result = append(result, '\\')
- s = s[2:]
- continue
- }
-
- // '\\' followed by an octal number
- if len(s) < 4 {
- return nil, fmt.Errorf("invalid bytea sequence %v", s)
- }
- r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
- if err != nil {
- return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
- }
- result = append(result, byte(r))
- s = s[4:]
- } else {
- // We hit an unescaped, raw byte. Try to read in as many as
- // possible in one go.
- i := bytes.IndexByte(s, '\\')
- if i == -1 {
- result = append(result, s...)
- break
- }
- result = append(result, s[:i]...)
- s = s[i:]
- }
- }
- }
-
- return result, nil
-}
-
-func encodeBytea(serverVersion int, v []byte) (result []byte) {
- if serverVersion >= 90000 {
- // Use the hex format if we know that the server supports it
- result = make([]byte, 2+hex.EncodedLen(len(v)))
- result[0] = '\\'
- result[1] = 'x'
- hex.Encode(result[2:], v)
- } else {
- // .. or resort to "escape"
- for _, b := range v {
- if b == '\\' {
- result = append(result, '\\', '\\')
- } else if b < 0x20 || b > 0x7e {
- result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
- } else {
- result = append(result, b)
- }
- }
- }
-
- return result
-}
-
-// NullTime represents a time.Time that may be null. NullTime implements the
-// sql.Scanner interface so it can be used as a scan destination, similar to
-// sql.NullString.
-type NullTime struct {
- Time time.Time
- Valid bool // Valid is true if Time is not NULL
-}
-
-// Scan implements the Scanner interface.
-func (nt *NullTime) Scan(value interface{}) error {
- nt.Time, nt.Valid = value.(time.Time)
- return nil
-}
-
-// Value implements the driver Valuer interface.
-func (nt NullTime) Value() (driver.Value, error) {
- if !nt.Valid {
- return nil, nil
- }
- return nt.Time, nil
-}
diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go
deleted file mode 100644
index 96aae29c..00000000
--- a/vendor/github.com/lib/pq/error.go
+++ /dev/null
@@ -1,515 +0,0 @@
-package pq
-
-import (
- "database/sql/driver"
- "fmt"
- "io"
- "net"
- "runtime"
-)
-
-// Error severities
-const (
- Efatal = "FATAL"
- Epanic = "PANIC"
- Ewarning = "WARNING"
- Enotice = "NOTICE"
- Edebug = "DEBUG"
- Einfo = "INFO"
- Elog = "LOG"
-)
-
-// Error represents an error communicating with the server.
-//
-// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
-type Error struct {
- Severity string
- Code ErrorCode
- Message string
- Detail string
- Hint string
- Position string
- InternalPosition string
- InternalQuery string
- Where string
- Schema string
- Table string
- Column string
- DataTypeName string
- Constraint string
- File string
- Line string
- Routine string
-}
-
-// ErrorCode is a five-character error code.
-type ErrorCode string
-
-// Name returns a more human friendly rendering of the error code, namely the
-// "condition name".
-//
-// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
-// details.
-func (ec ErrorCode) Name() string {
- return errorCodeNames[ec]
-}
-
-// ErrorClass is only the class part of an error code.
-type ErrorClass string
-
-// Name returns the condition name of an error class. It is equivalent to the
-// condition name of the "standard" error code (i.e. the one having the last
-// three characters "000").
-func (ec ErrorClass) Name() string {
- return errorCodeNames[ErrorCode(ec+"000")]
-}
-
-// Class returns the error class, e.g. "28".
-//
-// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
-// details.
-func (ec ErrorCode) Class() ErrorClass {
- return ErrorClass(ec[0:2])
-}
-
-// errorCodeNames is a mapping between the five-character error codes and the
-// human readable "condition names". It is derived from the list at
-// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
-var errorCodeNames = map[ErrorCode]string{
- // Class 00 - Successful Completion
- "00000": "successful_completion",
- // Class 01 - Warning
- "01000": "warning",
- "0100C": "dynamic_result_sets_returned",
- "01008": "implicit_zero_bit_padding",
- "01003": "null_value_eliminated_in_set_function",
- "01007": "privilege_not_granted",
- "01006": "privilege_not_revoked",
- "01004": "string_data_right_truncation",
- "01P01": "deprecated_feature",
- // Class 02 - No Data (this is also a warning class per the SQL standard)
- "02000": "no_data",
- "02001": "no_additional_dynamic_result_sets_returned",
- // Class 03 - SQL Statement Not Yet Complete
- "03000": "sql_statement_not_yet_complete",
- // Class 08 - Connection Exception
- "08000": "connection_exception",
- "08003": "connection_does_not_exist",
- "08006": "connection_failure",
- "08001": "sqlclient_unable_to_establish_sqlconnection",
- "08004": "sqlserver_rejected_establishment_of_sqlconnection",
- "08007": "transaction_resolution_unknown",
- "08P01": "protocol_violation",
- // Class 09 - Triggered Action Exception
- "09000": "triggered_action_exception",
- // Class 0A - Feature Not Supported
- "0A000": "feature_not_supported",
- // Class 0B - Invalid Transaction Initiation
- "0B000": "invalid_transaction_initiation",
- // Class 0F - Locator Exception
- "0F000": "locator_exception",
- "0F001": "invalid_locator_specification",
- // Class 0L - Invalid Grantor
- "0L000": "invalid_grantor",
- "0LP01": "invalid_grant_operation",
- // Class 0P - Invalid Role Specification
- "0P000": "invalid_role_specification",
- // Class 0Z - Diagnostics Exception
- "0Z000": "diagnostics_exception",
- "0Z002": "stacked_diagnostics_accessed_without_active_handler",
- // Class 20 - Case Not Found
- "20000": "case_not_found",
- // Class 21 - Cardinality Violation
- "21000": "cardinality_violation",
- // Class 22 - Data Exception
- "22000": "data_exception",
- "2202E": "array_subscript_error",
- "22021": "character_not_in_repertoire",
- "22008": "datetime_field_overflow",
- "22012": "division_by_zero",
- "22005": "error_in_assignment",
- "2200B": "escape_character_conflict",
- "22022": "indicator_overflow",
- "22015": "interval_field_overflow",
- "2201E": "invalid_argument_for_logarithm",
- "22014": "invalid_argument_for_ntile_function",
- "22016": "invalid_argument_for_nth_value_function",
- "2201F": "invalid_argument_for_power_function",
- "2201G": "invalid_argument_for_width_bucket_function",
- "22018": "invalid_character_value_for_cast",
- "22007": "invalid_datetime_format",
- "22019": "invalid_escape_character",
- "2200D": "invalid_escape_octet",
- "22025": "invalid_escape_sequence",
- "22P06": "nonstandard_use_of_escape_character",
- "22010": "invalid_indicator_parameter_value",
- "22023": "invalid_parameter_value",
- "2201B": "invalid_regular_expression",
- "2201W": "invalid_row_count_in_limit_clause",
- "2201X": "invalid_row_count_in_result_offset_clause",
- "22009": "invalid_time_zone_displacement_value",
- "2200C": "invalid_use_of_escape_character",
- "2200G": "most_specific_type_mismatch",
- "22004": "null_value_not_allowed",
- "22002": "null_value_no_indicator_parameter",
- "22003": "numeric_value_out_of_range",
- "2200H": "sequence_generator_limit_exceeded",
- "22026": "string_data_length_mismatch",
- "22001": "string_data_right_truncation",
- "22011": "substring_error",
- "22027": "trim_error",
- "22024": "unterminated_c_string",
- "2200F": "zero_length_character_string",
- "22P01": "floating_point_exception",
- "22P02": "invalid_text_representation",
- "22P03": "invalid_binary_representation",
- "22P04": "bad_copy_file_format",
- "22P05": "untranslatable_character",
- "2200L": "not_an_xml_document",
- "2200M": "invalid_xml_document",
- "2200N": "invalid_xml_content",
- "2200S": "invalid_xml_comment",
- "2200T": "invalid_xml_processing_instruction",
- // Class 23 - Integrity Constraint Violation
- "23000": "integrity_constraint_violation",
- "23001": "restrict_violation",
- "23502": "not_null_violation",
- "23503": "foreign_key_violation",
- "23505": "unique_violation",
- "23514": "check_violation",
- "23P01": "exclusion_violation",
- // Class 24 - Invalid Cursor State
- "24000": "invalid_cursor_state",
- // Class 25 - Invalid Transaction State
- "25000": "invalid_transaction_state",
- "25001": "active_sql_transaction",
- "25002": "branch_transaction_already_active",
- "25008": "held_cursor_requires_same_isolation_level",
- "25003": "inappropriate_access_mode_for_branch_transaction",
- "25004": "inappropriate_isolation_level_for_branch_transaction",
- "25005": "no_active_sql_transaction_for_branch_transaction",
- "25006": "read_only_sql_transaction",
- "25007": "schema_and_data_statement_mixing_not_supported",
- "25P01": "no_active_sql_transaction",
- "25P02": "in_failed_sql_transaction",
- // Class 26 - Invalid SQL Statement Name
- "26000": "invalid_sql_statement_name",
- // Class 27 - Triggered Data Change Violation
- "27000": "triggered_data_change_violation",
- // Class 28 - Invalid Authorization Specification
- "28000": "invalid_authorization_specification",
- "28P01": "invalid_password",
- // Class 2B - Dependent Privilege Descriptors Still Exist
- "2B000": "dependent_privilege_descriptors_still_exist",
- "2BP01": "dependent_objects_still_exist",
- // Class 2D - Invalid Transaction Termination
- "2D000": "invalid_transaction_termination",
- // Class 2F - SQL Routine Exception
- "2F000": "sql_routine_exception",
- "2F005": "function_executed_no_return_statement",
- "2F002": "modifying_sql_data_not_permitted",
- "2F003": "prohibited_sql_statement_attempted",
- "2F004": "reading_sql_data_not_permitted",
- // Class 34 - Invalid Cursor Name
- "34000": "invalid_cursor_name",
- // Class 38 - External Routine Exception
- "38000": "external_routine_exception",
- "38001": "containing_sql_not_permitted",
- "38002": "modifying_sql_data_not_permitted",
- "38003": "prohibited_sql_statement_attempted",
- "38004": "reading_sql_data_not_permitted",
- // Class 39 - External Routine Invocation Exception
- "39000": "external_routine_invocation_exception",
- "39001": "invalid_sqlstate_returned",
- "39004": "null_value_not_allowed",
- "39P01": "trigger_protocol_violated",
- "39P02": "srf_protocol_violated",
- // Class 3B - Savepoint Exception
- "3B000": "savepoint_exception",
- "3B001": "invalid_savepoint_specification",
- // Class 3D - Invalid Catalog Name
- "3D000": "invalid_catalog_name",
- // Class 3F - Invalid Schema Name
- "3F000": "invalid_schema_name",
- // Class 40 - Transaction Rollback
- "40000": "transaction_rollback",
- "40002": "transaction_integrity_constraint_violation",
- "40001": "serialization_failure",
- "40003": "statement_completion_unknown",
- "40P01": "deadlock_detected",
- // Class 42 - Syntax Error or Access Rule Violation
- "42000": "syntax_error_or_access_rule_violation",
- "42601": "syntax_error",
- "42501": "insufficient_privilege",
- "42846": "cannot_coerce",
- "42803": "grouping_error",
- "42P20": "windowing_error",
- "42P19": "invalid_recursion",
- "42830": "invalid_foreign_key",
- "42602": "invalid_name",
- "42622": "name_too_long",
- "42939": "reserved_name",
- "42804": "datatype_mismatch",
- "42P18": "indeterminate_datatype",
- "42P21": "collation_mismatch",
- "42P22": "indeterminate_collation",
- "42809": "wrong_object_type",
- "42703": "undefined_column",
- "42883": "undefined_function",
- "42P01": "undefined_table",
- "42P02": "undefined_parameter",
- "42704": "undefined_object",
- "42701": "duplicate_column",
- "42P03": "duplicate_cursor",
- "42P04": "duplicate_database",
- "42723": "duplicate_function",
- "42P05": "duplicate_prepared_statement",
- "42P06": "duplicate_schema",
- "42P07": "duplicate_table",
- "42712": "duplicate_alias",
- "42710": "duplicate_object",
- "42702": "ambiguous_column",
- "42725": "ambiguous_function",
- "42P08": "ambiguous_parameter",
- "42P09": "ambiguous_alias",
- "42P10": "invalid_column_reference",
- "42611": "invalid_column_definition",
- "42P11": "invalid_cursor_definition",
- "42P12": "invalid_database_definition",
- "42P13": "invalid_function_definition",
- "42P14": "invalid_prepared_statement_definition",
- "42P15": "invalid_schema_definition",
- "42P16": "invalid_table_definition",
- "42P17": "invalid_object_definition",
- // Class 44 - WITH CHECK OPTION Violation
- "44000": "with_check_option_violation",
- // Class 53 - Insufficient Resources
- "53000": "insufficient_resources",
- "53100": "disk_full",
- "53200": "out_of_memory",
- "53300": "too_many_connections",
- "53400": "configuration_limit_exceeded",
- // Class 54 - Program Limit Exceeded
- "54000": "program_limit_exceeded",
- "54001": "statement_too_complex",
- "54011": "too_many_columns",
- "54023": "too_many_arguments",
- // Class 55 - Object Not In Prerequisite State
- "55000": "object_not_in_prerequisite_state",
- "55006": "object_in_use",
- "55P02": "cant_change_runtime_param",
- "55P03": "lock_not_available",
- // Class 57 - Operator Intervention
- "57000": "operator_intervention",
- "57014": "query_canceled",
- "57P01": "admin_shutdown",
- "57P02": "crash_shutdown",
- "57P03": "cannot_connect_now",
- "57P04": "database_dropped",
- // Class 58 - System Error (errors external to PostgreSQL itself)
- "58000": "system_error",
- "58030": "io_error",
- "58P01": "undefined_file",
- "58P02": "duplicate_file",
- // Class F0 - Configuration File Error
- "F0000": "config_file_error",
- "F0001": "lock_file_exists",
- // Class HV - Foreign Data Wrapper Error (SQL/MED)
- "HV000": "fdw_error",
- "HV005": "fdw_column_name_not_found",
- "HV002": "fdw_dynamic_parameter_value_needed",
- "HV010": "fdw_function_sequence_error",
- "HV021": "fdw_inconsistent_descriptor_information",
- "HV024": "fdw_invalid_attribute_value",
- "HV007": "fdw_invalid_column_name",
- "HV008": "fdw_invalid_column_number",
- "HV004": "fdw_invalid_data_type",
- "HV006": "fdw_invalid_data_type_descriptors",
- "HV091": "fdw_invalid_descriptor_field_identifier",
- "HV00B": "fdw_invalid_handle",
- "HV00C": "fdw_invalid_option_index",
- "HV00D": "fdw_invalid_option_name",
- "HV090": "fdw_invalid_string_length_or_buffer_length",
- "HV00A": "fdw_invalid_string_format",
- "HV009": "fdw_invalid_use_of_null_pointer",
- "HV014": "fdw_too_many_handles",
- "HV001": "fdw_out_of_memory",
- "HV00P": "fdw_no_schemas",
- "HV00J": "fdw_option_name_not_found",
- "HV00K": "fdw_reply_handle",
- "HV00Q": "fdw_schema_not_found",
- "HV00R": "fdw_table_not_found",
- "HV00L": "fdw_unable_to_create_execution",
- "HV00M": "fdw_unable_to_create_reply",
- "HV00N": "fdw_unable_to_establish_connection",
- // Class P0 - PL/pgSQL Error
- "P0000": "plpgsql_error",
- "P0001": "raise_exception",
- "P0002": "no_data_found",
- "P0003": "too_many_rows",
- // Class XX - Internal Error
- "XX000": "internal_error",
- "XX001": "data_corrupted",
- "XX002": "index_corrupted",
-}
-
-func parseError(r *readBuf) *Error {
- err := new(Error)
- for t := r.byte(); t != 0; t = r.byte() {
- msg := r.string()
- switch t {
- case 'S':
- err.Severity = msg
- case 'C':
- err.Code = ErrorCode(msg)
- case 'M':
- err.Message = msg
- case 'D':
- err.Detail = msg
- case 'H':
- err.Hint = msg
- case 'P':
- err.Position = msg
- case 'p':
- err.InternalPosition = msg
- case 'q':
- err.InternalQuery = msg
- case 'W':
- err.Where = msg
- case 's':
- err.Schema = msg
- case 't':
- err.Table = msg
- case 'c':
- err.Column = msg
- case 'd':
- err.DataTypeName = msg
- case 'n':
- err.Constraint = msg
- case 'F':
- err.File = msg
- case 'L':
- err.Line = msg
- case 'R':
- err.Routine = msg
- }
- }
- return err
-}
-
-// Fatal returns true if the Error Severity is fatal.
-func (err *Error) Fatal() bool {
- return err.Severity == Efatal
-}
-
-// Get implements the legacy PGError interface. New code should use the fields
-// of the Error struct directly.
-func (err *Error) Get(k byte) (v string) {
- switch k {
- case 'S':
- return err.Severity
- case 'C':
- return string(err.Code)
- case 'M':
- return err.Message
- case 'D':
- return err.Detail
- case 'H':
- return err.Hint
- case 'P':
- return err.Position
- case 'p':
- return err.InternalPosition
- case 'q':
- return err.InternalQuery
- case 'W':
- return err.Where
- case 's':
- return err.Schema
- case 't':
- return err.Table
- case 'c':
- return err.Column
- case 'd':
- return err.DataTypeName
- case 'n':
- return err.Constraint
- case 'F':
- return err.File
- case 'L':
- return err.Line
- case 'R':
- return err.Routine
- }
- return ""
-}
-
-func (err Error) Error() string {
- return "pq: " + err.Message
-}
-
-// PGError is an interface used by previous versions of pq. It is provided
-// only to support legacy code. New code should use the Error type.
-type PGError interface {
- Error() string
- Fatal() bool
- Get(k byte) (v string)
-}
-
-func errorf(s string, args ...interface{}) {
- panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
-}
-
-// TODO(ainar-g) Rename to errorf after removing panics.
-func fmterrorf(s string, args ...interface{}) error {
- return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))
-}
-
-func errRecoverNoErrBadConn(err *error) {
- e := recover()
- if e == nil {
- // Do nothing
- return
- }
- var ok bool
- *err, ok = e.(error)
- if !ok {
- *err = fmt.Errorf("pq: unexpected error: %#v", e)
- }
-}
-
-func (c *conn) errRecover(err *error) {
- e := recover()
- switch v := e.(type) {
- case nil:
- // Do nothing
- case runtime.Error:
- c.bad = true
- panic(v)
- case *Error:
- if v.Fatal() {
- *err = driver.ErrBadConn
- } else {
- *err = v
- }
- case *net.OpError:
- c.bad = true
- *err = v
- case error:
- if v == io.EOF || v.(error).Error() == "remote error: handshake failure" {
- *err = driver.ErrBadConn
- } else {
- *err = v
- }
-
- default:
- c.bad = true
- panic(fmt.Sprintf("unknown error: %#v", e))
- }
-
- // Any time we return ErrBadConn, we need to remember it since *Tx doesn't
- // mark the connection bad in database/sql.
- if *err == driver.ErrBadConn {
- c.bad = true
- }
-}
diff --git a/vendor/github.com/lib/pq/go.mod b/vendor/github.com/lib/pq/go.mod
deleted file mode 100644
index edf0b343..00000000
--- a/vendor/github.com/lib/pq/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/lib/pq
diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go
deleted file mode 100644
index 850bb904..00000000
--- a/vendor/github.com/lib/pq/notify.go
+++ /dev/null
@@ -1,797 +0,0 @@
-package pq
-
-// Package pq is a pure Go Postgres driver for the database/sql package.
-// This module contains support for Postgres LISTEN/NOTIFY.
-
-import (
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Notification represents a single notification from the database.
-type Notification struct {
- // Process ID (PID) of the notifying postgres backend.
- BePid int
- // Name of the channel the notification was sent on.
- Channel string
- // Payload, or the empty string if unspecified.
- Extra string
-}
-
-func recvNotification(r *readBuf) *Notification {
- bePid := r.int32()
- channel := r.string()
- extra := r.string()
-
- return &Notification{bePid, channel, extra}
-}
-
-const (
- connStateIdle int32 = iota
- connStateExpectResponse
- connStateExpectReadyForQuery
-)
-
-type message struct {
- typ byte
- err error
-}
-
-var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
-
-// ListenerConn is a low-level interface for waiting for notifications. You
-// should use Listener instead.
-type ListenerConn struct {
- // guards cn and err
- connectionLock sync.Mutex
- cn *conn
- err error
-
- connState int32
-
- // the sending goroutine will be holding this lock
- senderLock sync.Mutex
-
- notificationChan chan<- *Notification
-
- replyChan chan message
-}
-
-// NewListenerConn creates a new ListenerConn. Use NewListener instead.
-func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
- return newDialListenerConn(defaultDialer{}, name, notificationChan)
-}
-
-func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
- cn, err := DialOpen(d, name)
- if err != nil {
- return nil, err
- }
-
- l := &ListenerConn{
- cn: cn.(*conn),
- notificationChan: c,
- connState: connStateIdle,
- replyChan: make(chan message, 2),
- }
-
- go l.listenerConnMain()
-
- return l, nil
-}
-
-// We can only allow one goroutine at a time to be running a query on the
-// connection for various reasons, so the goroutine sending on the connection
-// must be holding senderLock.
-//
-// Returns an error if an unrecoverable error has occurred and the ListenerConn
-// should be abandoned.
-func (l *ListenerConn) acquireSenderLock() error {
- // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
- l.senderLock.Lock()
-
- l.connectionLock.Lock()
- err := l.err
- l.connectionLock.Unlock()
- if err != nil {
- l.senderLock.Unlock()
- return err
- }
- return nil
-}
-
-func (l *ListenerConn) releaseSenderLock() {
- l.senderLock.Unlock()
-}
-
-// setState advances the protocol state to newState. Returns false if moving
-// to that state from the current state is not allowed.
-func (l *ListenerConn) setState(newState int32) bool {
- var expectedState int32
-
- switch newState {
- case connStateIdle:
- expectedState = connStateExpectReadyForQuery
- case connStateExpectResponse:
- expectedState = connStateIdle
- case connStateExpectReadyForQuery:
- expectedState = connStateExpectResponse
- default:
- panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
- }
-
- return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
-}
-
-// Main logic is here: receive messages from the postgres backend, forward
-// notifications and query replies and keep the internal state in sync with the
-// protocol state. Returns when the connection has been lost, is about to go
-// away or should be discarded because we couldn't agree on the state with the
-// server backend.
-func (l *ListenerConn) listenerConnLoop() (err error) {
- defer errRecoverNoErrBadConn(&err)
-
- r := &readBuf{}
- for {
- t, err := l.cn.recvMessage(r)
- if err != nil {
- return err
- }
-
- switch t {
- case 'A':
- // recvNotification copies all the data so we don't need to worry
- // about the scratch buffer being overwritten.
- l.notificationChan <- recvNotification(r)
-
- case 'T', 'D':
- // only used by tests; ignore
-
- case 'E':
- // We might receive an ErrorResponse even when not in a query; it
- // is expected that the server will close the connection after
- // that, but we should make sure that the error we display is the
- // one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
- if !l.setState(connStateExpectReadyForQuery) {
- return parseError(r)
- }
- l.replyChan <- message{t, parseError(r)}
-
- case 'C', 'I':
- if !l.setState(connStateExpectReadyForQuery) {
- // protocol out of sync
- return fmt.Errorf("unexpected CommandComplete")
- }
- // ExecSimpleQuery doesn't need to know about this message
-
- case 'Z':
- if !l.setState(connStateIdle) {
- // protocol out of sync
- return fmt.Errorf("unexpected ReadyForQuery")
- }
- l.replyChan <- message{t, nil}
-
- case 'N', 'S':
- // ignore
- default:
- return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
- }
- }
-}
-
-// This is the main routine for the goroutine receiving on the database
-// connection. Most of the main logic is in listenerConnLoop.
-func (l *ListenerConn) listenerConnMain() {
- err := l.listenerConnLoop()
-
- // listenerConnLoop terminated; we're done, but we still have to clean up.
- // Make sure nobody tries to start any new queries by making sure the err
- // pointer is set. It is important that we do not overwrite its value; a
- // connection could be closed by either this goroutine or one sending on
- // the connection -- whoever closes the connection is assumed to have the
- // more meaningful error message (as the other one will probably get
- // net.errClosed), so that goroutine sets the error we expose while the
- // other error is discarded. If the connection is lost while two
- // goroutines are operating on the socket, it probably doesn't matter which
- // error we expose so we don't try to do anything more complex.
- l.connectionLock.Lock()
- if l.err == nil {
- l.err = err
- }
- l.cn.Close()
- l.connectionLock.Unlock()
-
- // There might be a query in-flight; make sure nobody's waiting for a
- // response to it, since there's not going to be one.
- close(l.replyChan)
-
- // let the listener know we're done
- close(l.notificationChan)
-
- // this ListenerConn is done
-}
-
-// Listen sends a LISTEN query to the server. See ExecSimpleQuery.
-func (l *ListenerConn) Listen(channel string) (bool, error) {
- return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
-}
-
-// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery.
-func (l *ListenerConn) Unlisten(channel string) (bool, error) {
- return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
-}
-
-// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery.
-func (l *ListenerConn) UnlistenAll() (bool, error) {
- return l.ExecSimpleQuery("UNLISTEN *")
-}
-
-// Ping the remote server to make sure it's alive. Non-nil error means the
-// connection has failed and should be abandoned.
-func (l *ListenerConn) Ping() error {
- sent, err := l.ExecSimpleQuery("")
- if !sent {
- return err
- }
- if err != nil {
- // shouldn't happen
- panic(err)
- }
- return nil
-}
-
-// Attempt to send a query on the connection. Returns an error if sending the
-// query failed, and the caller should initiate closure of this connection.
-// The caller must be holding senderLock (see acquireSenderLock and
-// releaseSenderLock).
-func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
- defer errRecoverNoErrBadConn(&err)
-
- // must set connection state before sending the query
- if !l.setState(connStateExpectResponse) {
- panic("two queries running at the same time")
- }
-
- // Can't use l.cn.writeBuf here because it uses the scratch buffer which
- // might get overwritten by listenerConnLoop.
- b := &writeBuf{
- buf: []byte("Q\x00\x00\x00\x00"),
- pos: 1,
- }
- b.string(q)
- l.cn.send(b)
-
- return nil
-}
-
-// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable
-// parameters) on the connection. The possible return values are:
-// 1) "executed" is true; the query was executed to completion on the
-// database server. If the query failed, err will be set to the error
-// returned by the database, otherwise err will be nil.
-// 2) If "executed" is false, the query could not be executed on the remote
-// server. err will be non-nil.
-//
-// After a call to ExecSimpleQuery has returned an executed=false value, the
-// connection has either been closed or will be closed shortly thereafter, and
-// all subsequently executed queries will return an error.
-func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
- if err = l.acquireSenderLock(); err != nil {
- return false, err
- }
- defer l.releaseSenderLock()
-
- err = l.sendSimpleQuery(q)
- if err != nil {
- // We can't know what state the protocol is in, so we need to abandon
- // this connection.
- l.connectionLock.Lock()
- // Set the error pointer if it hasn't been set already; see
- // listenerConnMain.
- if l.err == nil {
- l.err = err
- }
- l.connectionLock.Unlock()
- l.cn.c.Close()
- return false, err
- }
-
- // now we just wait for a reply..
- for {
- m, ok := <-l.replyChan
- if !ok {
- // We lost the connection to server, don't bother waiting for a
- // a response. err should have been set already.
- l.connectionLock.Lock()
- err := l.err
- l.connectionLock.Unlock()
- return false, err
- }
- switch m.typ {
- case 'Z':
- // sanity check
- if m.err != nil {
- panic("m.err != nil")
- }
- // done; err might or might not be set
- return true, err
-
- case 'E':
- // sanity check
- if m.err == nil {
- panic("m.err == nil")
- }
- // server responded with an error; ReadyForQuery to follow
- err = m.err
-
- default:
- return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
- }
- }
-}
-
-// Close closes the connection.
-func (l *ListenerConn) Close() error {
- l.connectionLock.Lock()
- if l.err != nil {
- l.connectionLock.Unlock()
- return errListenerConnClosed
- }
- l.err = errListenerConnClosed
- l.connectionLock.Unlock()
- // We can't send anything on the connection without holding senderLock.
- // Simply close the net.Conn to wake up everyone operating on it.
- return l.cn.c.Close()
-}
-
-// Err returns the reason the connection was closed. It is not safe to call
-// this function until l.Notify has been closed.
-func (l *ListenerConn) Err() error {
- return l.err
-}
-
-var errListenerClosed = errors.New("pq: Listener has been closed")
-
-// ErrChannelAlreadyOpen is returned from Listen when a channel is already
-// open.
-var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
-
-// ErrChannelNotOpen is returned from Unlisten when a channel is not open.
-var ErrChannelNotOpen = errors.New("pq: channel is not open")
-
-// ListenerEventType is an enumeration of listener event types.
-type ListenerEventType int
-
-const (
- // ListenerEventConnected is emitted only when the database connection
- // has been initially initialized. The err argument of the callback
- // will always be nil.
- ListenerEventConnected ListenerEventType = iota
-
- // ListenerEventDisconnected is emitted after a database connection has
- // been lost, either because of an error or because Close has been
- // called. The err argument will be set to the reason the database
- // connection was lost.
- ListenerEventDisconnected
-
- // ListenerEventReconnected is emitted after a database connection has
- // been re-established after connection loss. The err argument of the
- // callback will always be nil. After this event has been emitted, a
- // nil pq.Notification is sent on the Listener.Notify channel.
- ListenerEventReconnected
-
- // ListenerEventConnectionAttemptFailed is emitted after a connection
- // to the database was attempted, but failed. The err argument will be
- // set to an error describing why the connection attempt did not
- // succeed.
- ListenerEventConnectionAttemptFailed
-)
-
-// EventCallbackType is the event callback type. See also ListenerEventType
-// constants' documentation.
-type EventCallbackType func(event ListenerEventType, err error)
-
-// Listener provides an interface for listening to notifications from a
-// PostgreSQL database. For general usage information, see section
-// "Notifications".
-//
-// Listener can safely be used from concurrently running goroutines.
-type Listener struct {
- // Channel for receiving notifications from the database. In some cases a
- // nil value will be sent. See section "Notifications" above.
- Notify chan *Notification
-
- name string
- minReconnectInterval time.Duration
- maxReconnectInterval time.Duration
- dialer Dialer
- eventCallback EventCallbackType
-
- lock sync.Mutex
- isClosed bool
- reconnectCond *sync.Cond
- cn *ListenerConn
- connNotificationChan <-chan *Notification
- channels map[string]struct{}
-}
-
-// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
-//
-// name should be set to a connection string to be used to establish the
-// database connection (see section "Connection String Parameters" above).
-//
-// minReconnectInterval controls the duration to wait before trying to
-// re-establish the database connection after connection loss. After each
-// consecutive failure this interval is doubled, until maxReconnectInterval is
-// reached. Successfully completing the connection establishment procedure
-// resets the interval back to minReconnectInterval.
-//
-// The last parameter eventCallback can be set to a function which will be
-// called by the Listener when the state of the underlying database connection
-// changes. This callback will be called by the goroutine which dispatches the
-// notifications over the Notify channel, so you should try to avoid doing
-// potentially time-consuming operations from the callback.
-func NewListener(name string,
- minReconnectInterval time.Duration,
- maxReconnectInterval time.Duration,
- eventCallback EventCallbackType) *Listener {
- return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
-}
-
-// NewDialListener is like NewListener but it takes a Dialer.
-func NewDialListener(d Dialer,
- name string,
- minReconnectInterval time.Duration,
- maxReconnectInterval time.Duration,
- eventCallback EventCallbackType) *Listener {
-
- l := &Listener{
- name: name,
- minReconnectInterval: minReconnectInterval,
- maxReconnectInterval: maxReconnectInterval,
- dialer: d,
- eventCallback: eventCallback,
-
- channels: make(map[string]struct{}),
-
- Notify: make(chan *Notification, 32),
- }
- l.reconnectCond = sync.NewCond(&l.lock)
-
- go l.listenerMain()
-
- return l
-}
-
-// NotificationChannel returns the notification channel for this listener.
-// This is the same channel as Notify, and will not be recreated during the
-// life time of the Listener.
-func (l *Listener) NotificationChannel() <-chan *Notification {
- return l.Notify
-}
-
-// Listen starts listening for notifications on a channel. Calls to this
-// function will block until an acknowledgement has been received from the
-// server. Note that Listener automatically re-establishes the connection
-// after connection loss, so this function may block indefinitely if the
-// connection can not be re-established.
-//
-// Listen will only fail in three conditions:
-// 1) The channel is already open. The returned error will be
-// ErrChannelAlreadyOpen.
-// 2) The query was executed on the remote server, but PostgreSQL returned an
-// error message in response to the query. The returned error will be a
-// pq.Error containing the information the server supplied.
-// 3) Close is called on the Listener before the request could be completed.
-//
-// The channel name is case-sensitive.
-func (l *Listener) Listen(channel string) error {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- if l.isClosed {
- return errListenerClosed
- }
-
- // The server allows you to issue a LISTEN on a channel which is already
- // open, but it seems useful to be able to detect this case to spot for
- // mistakes in application logic. If the application genuinely does't
- // care, it can check the exported error and ignore it.
- _, exists := l.channels[channel]
- if exists {
- return ErrChannelAlreadyOpen
- }
-
- if l.cn != nil {
- // If gotResponse is true but error is set, the query was executed on
- // the remote server, but resulted in an error. This should be
- // relatively rare, so it's fine if we just pass the error to our
- // caller. However, if gotResponse is false, we could not complete the
- // query on the remote server and our underlying connection is about
- // to go away, so we only add relname to l.channels, and wait for
- // resync() to take care of the rest.
- gotResponse, err := l.cn.Listen(channel)
- if gotResponse && err != nil {
- return err
- }
- }
-
- l.channels[channel] = struct{}{}
- for l.cn == nil {
- l.reconnectCond.Wait()
- // we let go of the mutex for a while
- if l.isClosed {
- return errListenerClosed
- }
- }
-
- return nil
-}
-
-// Unlisten removes a channel from the Listener's channel list. Returns
-// ErrChannelNotOpen if the Listener is not listening on the specified channel.
-// Returns immediately with no error if there is no connection. Note that you
-// might still get notifications for this channel even after Unlisten has
-// returned.
-//
-// The channel name is case-sensitive.
-func (l *Listener) Unlisten(channel string) error {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- if l.isClosed {
- return errListenerClosed
- }
-
- // Similarly to LISTEN, this is not an error in Postgres, but it seems
- // useful to distinguish from the normal conditions.
- _, exists := l.channels[channel]
- if !exists {
- return ErrChannelNotOpen
- }
-
- if l.cn != nil {
- // Similarly to Listen (see comment in that function), the caller
- // should only be bothered with an error if it came from the backend as
- // a response to our query.
- gotResponse, err := l.cn.Unlisten(channel)
- if gotResponse && err != nil {
- return err
- }
- }
-
- // Don't bother waiting for resync if there's no connection.
- delete(l.channels, channel)
- return nil
-}
-
-// UnlistenAll removes all channels from the Listener's channel list. Returns
-// immediately with no error if there is no connection. Note that you might
-// still get notifications for any of the deleted channels even after
-// UnlistenAll has returned.
-func (l *Listener) UnlistenAll() error {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- if l.isClosed {
- return errListenerClosed
- }
-
- if l.cn != nil {
- // Similarly to Listen (see comment in that function), the caller
- // should only be bothered with an error if it came from the backend as
- // a response to our query.
- gotResponse, err := l.cn.UnlistenAll()
- if gotResponse && err != nil {
- return err
- }
- }
-
- // Don't bother waiting for resync if there's no connection.
- l.channels = make(map[string]struct{})
- return nil
-}
-
-// Ping the remote server to make sure it's alive. Non-nil return value means
-// that there is no active connection.
-func (l *Listener) Ping() error {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- if l.isClosed {
- return errListenerClosed
- }
- if l.cn == nil {
- return errors.New("no connection")
- }
-
- return l.cn.Ping()
-}
-
-// Clean up after losing the server connection. Returns l.cn.Err(), which
-// should have the reason the connection was lost.
-func (l *Listener) disconnectCleanup() error {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- // sanity check; can't look at Err() until the channel has been closed
- select {
- case _, ok := <-l.connNotificationChan:
- if ok {
- panic("connNotificationChan not closed")
- }
- default:
- panic("connNotificationChan not closed")
- }
-
- err := l.cn.Err()
- l.cn.Close()
- l.cn = nil
- return err
-}
-
-// Synchronize the list of channels we want to be listening on with the server
-// after the connection has been established.
-func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
- doneChan := make(chan error)
- go func(notificationChan <-chan *Notification) {
- for channel := range l.channels {
- // If we got a response, return that error to our caller as it's
- // going to be more descriptive than cn.Err().
- gotResponse, err := cn.Listen(channel)
- if gotResponse && err != nil {
- doneChan <- err
- return
- }
-
- // If we couldn't reach the server, wait for notificationChan to
- // close and then return the error message from the connection, as
- // per ListenerConn's interface.
- if err != nil {
- for range notificationChan {
- }
- doneChan <- cn.Err()
- return
- }
- }
- doneChan <- nil
- }(notificationChan)
-
- // Ignore notifications while synchronization is going on to avoid
- // deadlocks. We have to send a nil notification over Notify anyway as
- // we can't possibly know which notifications (if any) were lost while
- // the connection was down, so there's no reason to try and process
- // these messages at all.
- for {
- select {
- case _, ok := <-notificationChan:
- if !ok {
- notificationChan = nil
- }
-
- case err := <-doneChan:
- return err
- }
- }
-}
-
-// caller should NOT be holding l.lock
-func (l *Listener) closed() bool {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- return l.isClosed
-}
-
-func (l *Listener) connect() error {
- notificationChan := make(chan *Notification, 32)
- cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
- if err != nil {
- return err
- }
-
- l.lock.Lock()
- defer l.lock.Unlock()
-
- err = l.resync(cn, notificationChan)
- if err != nil {
- cn.Close()
- return err
- }
-
- l.cn = cn
- l.connNotificationChan = notificationChan
- l.reconnectCond.Broadcast()
-
- return nil
-}
-
-// Close disconnects the Listener from the database and shuts it down.
-// Subsequent calls to its methods will return an error. Close returns an
-// error if the connection has already been closed.
-func (l *Listener) Close() error {
- l.lock.Lock()
- defer l.lock.Unlock()
-
- if l.isClosed {
- return errListenerClosed
- }
-
- if l.cn != nil {
- l.cn.Close()
- }
- l.isClosed = true
-
- // Unblock calls to Listen()
- l.reconnectCond.Broadcast()
-
- return nil
-}
-
-func (l *Listener) emitEvent(event ListenerEventType, err error) {
- if l.eventCallback != nil {
- l.eventCallback(event, err)
- }
-}
-
-// Main logic here: maintain a connection to the server when possible, wait
-// for notifications and emit events.
-func (l *Listener) listenerConnLoop() {
- var nextReconnect time.Time
-
- reconnectInterval := l.minReconnectInterval
- for {
- for {
- err := l.connect()
- if err == nil {
- break
- }
-
- if l.closed() {
- return
- }
- l.emitEvent(ListenerEventConnectionAttemptFailed, err)
-
- time.Sleep(reconnectInterval)
- reconnectInterval *= 2
- if reconnectInterval > l.maxReconnectInterval {
- reconnectInterval = l.maxReconnectInterval
- }
- }
-
- if nextReconnect.IsZero() {
- l.emitEvent(ListenerEventConnected, nil)
- } else {
- l.emitEvent(ListenerEventReconnected, nil)
- l.Notify <- nil
- }
-
- reconnectInterval = l.minReconnectInterval
- nextReconnect = time.Now().Add(reconnectInterval)
-
- for {
- notification, ok := <-l.connNotificationChan
- if !ok {
- // lost connection, loop again
- break
- }
- l.Notify <- notification
- }
-
- err := l.disconnectCleanup()
- if l.closed() {
- return
- }
- l.emitEvent(ListenerEventDisconnected, err)
-
- time.Sleep(time.Until(nextReconnect))
- }
-}
-
-func (l *Listener) listenerMain() {
- l.listenerConnLoop()
- close(l.Notify)
-}
diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go
deleted file mode 100644
index caaede24..00000000
--- a/vendor/github.com/lib/pq/oid/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Package oid contains OID constants
-// as defined by the Postgres server.
-package oid
-
-// Oid is a Postgres Object ID.
-type Oid uint32
diff --git a/vendor/github.com/lib/pq/oid/gen.go b/vendor/github.com/lib/pq/oid/gen.go
deleted file mode 100644
index 7c634cdc..00000000
--- a/vendor/github.com/lib/pq/oid/gen.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// +build ignore
-
-// Generate the table of OID values
-// Run with 'go run gen.go'.
-package main
-
-import (
- "database/sql"
- "fmt"
- "log"
- "os"
- "os/exec"
- "strings"
-
- _ "github.com/lib/pq"
-)
-
-// OID represent a postgres Object Identifier Type.
-type OID struct {
- ID int
- Type string
-}
-
-// Name returns an upper case version of the oid type.
-func (o OID) Name() string {
- return strings.ToUpper(o.Type)
-}
-
-func main() {
- datname := os.Getenv("PGDATABASE")
- sslmode := os.Getenv("PGSSLMODE")
-
- if datname == "" {
- os.Setenv("PGDATABASE", "pqgotest")
- }
-
- if sslmode == "" {
- os.Setenv("PGSSLMODE", "disable")
- }
-
- db, err := sql.Open("postgres", "")
- if err != nil {
- log.Fatal(err)
- }
- rows, err := db.Query(`
- SELECT typname, oid
- FROM pg_type WHERE oid < 10000
- ORDER BY oid;
- `)
- if err != nil {
- log.Fatal(err)
- }
- oids := make([]*OID, 0)
- for rows.Next() {
- var oid OID
- if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
- log.Fatal(err)
- }
- oids = append(oids, &oid)
- }
- if err = rows.Err(); err != nil {
- log.Fatal(err)
- }
- cmd := exec.Command("gofmt")
- cmd.Stderr = os.Stderr
- w, err := cmd.StdinPipe()
- if err != nil {
- log.Fatal(err)
- }
- f, err := os.Create("types.go")
- if err != nil {
- log.Fatal(err)
- }
- cmd.Stdout = f
- err = cmd.Start()
- if err != nil {
- log.Fatal(err)
- }
- fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
- fmt.Fprintln(w, "\npackage oid")
- fmt.Fprintln(w, "const (")
- for _, oid := range oids {
- fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
- }
- fmt.Fprintln(w, ")")
- fmt.Fprintln(w, "var TypeName = map[Oid]string{")
- for _, oid := range oids {
- fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
- }
- fmt.Fprintln(w, "}")
- w.Close()
- cmd.Wait()
-}
diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go
deleted file mode 100644
index ecc84c2c..00000000
--- a/vendor/github.com/lib/pq/oid/types.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Code generated by gen.go. DO NOT EDIT.
-
-package oid
-
-const (
- T_bool Oid = 16
- T_bytea Oid = 17
- T_char Oid = 18
- T_name Oid = 19
- T_int8 Oid = 20
- T_int2 Oid = 21
- T_int2vector Oid = 22
- T_int4 Oid = 23
- T_regproc Oid = 24
- T_text Oid = 25
- T_oid Oid = 26
- T_tid Oid = 27
- T_xid Oid = 28
- T_cid Oid = 29
- T_oidvector Oid = 30
- T_pg_ddl_command Oid = 32
- T_pg_type Oid = 71
- T_pg_attribute Oid = 75
- T_pg_proc Oid = 81
- T_pg_class Oid = 83
- T_json Oid = 114
- T_xml Oid = 142
- T__xml Oid = 143
- T_pg_node_tree Oid = 194
- T__json Oid = 199
- T_smgr Oid = 210
- T_index_am_handler Oid = 325
- T_point Oid = 600
- T_lseg Oid = 601
- T_path Oid = 602
- T_box Oid = 603
- T_polygon Oid = 604
- T_line Oid = 628
- T__line Oid = 629
- T_cidr Oid = 650
- T__cidr Oid = 651
- T_float4 Oid = 700
- T_float8 Oid = 701
- T_abstime Oid = 702
- T_reltime Oid = 703
- T_tinterval Oid = 704
- T_unknown Oid = 705
- T_circle Oid = 718
- T__circle Oid = 719
- T_money Oid = 790
- T__money Oid = 791
- T_macaddr Oid = 829
- T_inet Oid = 869
- T__bool Oid = 1000
- T__bytea Oid = 1001
- T__char Oid = 1002
- T__name Oid = 1003
- T__int2 Oid = 1005
- T__int2vector Oid = 1006
- T__int4 Oid = 1007
- T__regproc Oid = 1008
- T__text Oid = 1009
- T__tid Oid = 1010
- T__xid Oid = 1011
- T__cid Oid = 1012
- T__oidvector Oid = 1013
- T__bpchar Oid = 1014
- T__varchar Oid = 1015
- T__int8 Oid = 1016
- T__point Oid = 1017
- T__lseg Oid = 1018
- T__path Oid = 1019
- T__box Oid = 1020
- T__float4 Oid = 1021
- T__float8 Oid = 1022
- T__abstime Oid = 1023
- T__reltime Oid = 1024
- T__tinterval Oid = 1025
- T__polygon Oid = 1027
- T__oid Oid = 1028
- T_aclitem Oid = 1033
- T__aclitem Oid = 1034
- T__macaddr Oid = 1040
- T__inet Oid = 1041
- T_bpchar Oid = 1042
- T_varchar Oid = 1043
- T_date Oid = 1082
- T_time Oid = 1083
- T_timestamp Oid = 1114
- T__timestamp Oid = 1115
- T__date Oid = 1182
- T__time Oid = 1183
- T_timestamptz Oid = 1184
- T__timestamptz Oid = 1185
- T_interval Oid = 1186
- T__interval Oid = 1187
- T__numeric Oid = 1231
- T_pg_database Oid = 1248
- T__cstring Oid = 1263
- T_timetz Oid = 1266
- T__timetz Oid = 1270
- T_bit Oid = 1560
- T__bit Oid = 1561
- T_varbit Oid = 1562
- T__varbit Oid = 1563
- T_numeric Oid = 1700
- T_refcursor Oid = 1790
- T__refcursor Oid = 2201
- T_regprocedure Oid = 2202
- T_regoper Oid = 2203
- T_regoperator Oid = 2204
- T_regclass Oid = 2205
- T_regtype Oid = 2206
- T__regprocedure Oid = 2207
- T__regoper Oid = 2208
- T__regoperator Oid = 2209
- T__regclass Oid = 2210
- T__regtype Oid = 2211
- T_record Oid = 2249
- T_cstring Oid = 2275
- T_any Oid = 2276
- T_anyarray Oid = 2277
- T_void Oid = 2278
- T_trigger Oid = 2279
- T_language_handler Oid = 2280
- T_internal Oid = 2281
- T_opaque Oid = 2282
- T_anyelement Oid = 2283
- T__record Oid = 2287
- T_anynonarray Oid = 2776
- T_pg_authid Oid = 2842
- T_pg_auth_members Oid = 2843
- T__txid_snapshot Oid = 2949
- T_uuid Oid = 2950
- T__uuid Oid = 2951
- T_txid_snapshot Oid = 2970
- T_fdw_handler Oid = 3115
- T_pg_lsn Oid = 3220
- T__pg_lsn Oid = 3221
- T_tsm_handler Oid = 3310
- T_anyenum Oid = 3500
- T_tsvector Oid = 3614
- T_tsquery Oid = 3615
- T_gtsvector Oid = 3642
- T__tsvector Oid = 3643
- T__gtsvector Oid = 3644
- T__tsquery Oid = 3645
- T_regconfig Oid = 3734
- T__regconfig Oid = 3735
- T_regdictionary Oid = 3769
- T__regdictionary Oid = 3770
- T_jsonb Oid = 3802
- T__jsonb Oid = 3807
- T_anyrange Oid = 3831
- T_event_trigger Oid = 3838
- T_int4range Oid = 3904
- T__int4range Oid = 3905
- T_numrange Oid = 3906
- T__numrange Oid = 3907
- T_tsrange Oid = 3908
- T__tsrange Oid = 3909
- T_tstzrange Oid = 3910
- T__tstzrange Oid = 3911
- T_daterange Oid = 3912
- T__daterange Oid = 3913
- T_int8range Oid = 3926
- T__int8range Oid = 3927
- T_pg_shseclabel Oid = 4066
- T_regnamespace Oid = 4089
- T__regnamespace Oid = 4090
- T_regrole Oid = 4096
- T__regrole Oid = 4097
-)
-
-var TypeName = map[Oid]string{
- T_bool: "BOOL",
- T_bytea: "BYTEA",
- T_char: "CHAR",
- T_name: "NAME",
- T_int8: "INT8",
- T_int2: "INT2",
- T_int2vector: "INT2VECTOR",
- T_int4: "INT4",
- T_regproc: "REGPROC",
- T_text: "TEXT",
- T_oid: "OID",
- T_tid: "TID",
- T_xid: "XID",
- T_cid: "CID",
- T_oidvector: "OIDVECTOR",
- T_pg_ddl_command: "PG_DDL_COMMAND",
- T_pg_type: "PG_TYPE",
- T_pg_attribute: "PG_ATTRIBUTE",
- T_pg_proc: "PG_PROC",
- T_pg_class: "PG_CLASS",
- T_json: "JSON",
- T_xml: "XML",
- T__xml: "_XML",
- T_pg_node_tree: "PG_NODE_TREE",
- T__json: "_JSON",
- T_smgr: "SMGR",
- T_index_am_handler: "INDEX_AM_HANDLER",
- T_point: "POINT",
- T_lseg: "LSEG",
- T_path: "PATH",
- T_box: "BOX",
- T_polygon: "POLYGON",
- T_line: "LINE",
- T__line: "_LINE",
- T_cidr: "CIDR",
- T__cidr: "_CIDR",
- T_float4: "FLOAT4",
- T_float8: "FLOAT8",
- T_abstime: "ABSTIME",
- T_reltime: "RELTIME",
- T_tinterval: "TINTERVAL",
- T_unknown: "UNKNOWN",
- T_circle: "CIRCLE",
- T__circle: "_CIRCLE",
- T_money: "MONEY",
- T__money: "_MONEY",
- T_macaddr: "MACADDR",
- T_inet: "INET",
- T__bool: "_BOOL",
- T__bytea: "_BYTEA",
- T__char: "_CHAR",
- T__name: "_NAME",
- T__int2: "_INT2",
- T__int2vector: "_INT2VECTOR",
- T__int4: "_INT4",
- T__regproc: "_REGPROC",
- T__text: "_TEXT",
- T__tid: "_TID",
- T__xid: "_XID",
- T__cid: "_CID",
- T__oidvector: "_OIDVECTOR",
- T__bpchar: "_BPCHAR",
- T__varchar: "_VARCHAR",
- T__int8: "_INT8",
- T__point: "_POINT",
- T__lseg: "_LSEG",
- T__path: "_PATH",
- T__box: "_BOX",
- T__float4: "_FLOAT4",
- T__float8: "_FLOAT8",
- T__abstime: "_ABSTIME",
- T__reltime: "_RELTIME",
- T__tinterval: "_TINTERVAL",
- T__polygon: "_POLYGON",
- T__oid: "_OID",
- T_aclitem: "ACLITEM",
- T__aclitem: "_ACLITEM",
- T__macaddr: "_MACADDR",
- T__inet: "_INET",
- T_bpchar: "BPCHAR",
- T_varchar: "VARCHAR",
- T_date: "DATE",
- T_time: "TIME",
- T_timestamp: "TIMESTAMP",
- T__timestamp: "_TIMESTAMP",
- T__date: "_DATE",
- T__time: "_TIME",
- T_timestamptz: "TIMESTAMPTZ",
- T__timestamptz: "_TIMESTAMPTZ",
- T_interval: "INTERVAL",
- T__interval: "_INTERVAL",
- T__numeric: "_NUMERIC",
- T_pg_database: "PG_DATABASE",
- T__cstring: "_CSTRING",
- T_timetz: "TIMETZ",
- T__timetz: "_TIMETZ",
- T_bit: "BIT",
- T__bit: "_BIT",
- T_varbit: "VARBIT",
- T__varbit: "_VARBIT",
- T_numeric: "NUMERIC",
- T_refcursor: "REFCURSOR",
- T__refcursor: "_REFCURSOR",
- T_regprocedure: "REGPROCEDURE",
- T_regoper: "REGOPER",
- T_regoperator: "REGOPERATOR",
- T_regclass: "REGCLASS",
- T_regtype: "REGTYPE",
- T__regprocedure: "_REGPROCEDURE",
- T__regoper: "_REGOPER",
- T__regoperator: "_REGOPERATOR",
- T__regclass: "_REGCLASS",
- T__regtype: "_REGTYPE",
- T_record: "RECORD",
- T_cstring: "CSTRING",
- T_any: "ANY",
- T_anyarray: "ANYARRAY",
- T_void: "VOID",
- T_trigger: "TRIGGER",
- T_language_handler: "LANGUAGE_HANDLER",
- T_internal: "INTERNAL",
- T_opaque: "OPAQUE",
- T_anyelement: "ANYELEMENT",
- T__record: "_RECORD",
- T_anynonarray: "ANYNONARRAY",
- T_pg_authid: "PG_AUTHID",
- T_pg_auth_members: "PG_AUTH_MEMBERS",
- T__txid_snapshot: "_TXID_SNAPSHOT",
- T_uuid: "UUID",
- T__uuid: "_UUID",
- T_txid_snapshot: "TXID_SNAPSHOT",
- T_fdw_handler: "FDW_HANDLER",
- T_pg_lsn: "PG_LSN",
- T__pg_lsn: "_PG_LSN",
- T_tsm_handler: "TSM_HANDLER",
- T_anyenum: "ANYENUM",
- T_tsvector: "TSVECTOR",
- T_tsquery: "TSQUERY",
- T_gtsvector: "GTSVECTOR",
- T__tsvector: "_TSVECTOR",
- T__gtsvector: "_GTSVECTOR",
- T__tsquery: "_TSQUERY",
- T_regconfig: "REGCONFIG",
- T__regconfig: "_REGCONFIG",
- T_regdictionary: "REGDICTIONARY",
- T__regdictionary: "_REGDICTIONARY",
- T_jsonb: "JSONB",
- T__jsonb: "_JSONB",
- T_anyrange: "ANYRANGE",
- T_event_trigger: "EVENT_TRIGGER",
- T_int4range: "INT4RANGE",
- T__int4range: "_INT4RANGE",
- T_numrange: "NUMRANGE",
- T__numrange: "_NUMRANGE",
- T_tsrange: "TSRANGE",
- T__tsrange: "_TSRANGE",
- T_tstzrange: "TSTZRANGE",
- T__tstzrange: "_TSTZRANGE",
- T_daterange: "DATERANGE",
- T__daterange: "_DATERANGE",
- T_int8range: "INT8RANGE",
- T__int8range: "_INT8RANGE",
- T_pg_shseclabel: "PG_SHSECLABEL",
- T_regnamespace: "REGNAMESPACE",
- T__regnamespace: "_REGNAMESPACE",
- T_regrole: "REGROLE",
- T__regrole: "_REGROLE",
-}
diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go
deleted file mode 100644
index c6aa5b9a..00000000
--- a/vendor/github.com/lib/pq/rows.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package pq
-
-import (
- "math"
- "reflect"
- "time"
-
- "github.com/lib/pq/oid"
-)
-
-const headerSize = 4
-
-type fieldDesc struct {
- // The object ID of the data type.
- OID oid.Oid
- // The data type size (see pg_type.typlen).
- // Note that negative values denote variable-width types.
- Len int
- // The type modifier (see pg_attribute.atttypmod).
- // The meaning of the modifier is type-specific.
- Mod int
-}
-
-func (fd fieldDesc) Type() reflect.Type {
- switch fd.OID {
- case oid.T_int8:
- return reflect.TypeOf(int64(0))
- case oid.T_int4:
- return reflect.TypeOf(int32(0))
- case oid.T_int2:
- return reflect.TypeOf(int16(0))
- case oid.T_varchar, oid.T_text:
- return reflect.TypeOf("")
- case oid.T_bool:
- return reflect.TypeOf(false)
- case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
- return reflect.TypeOf(time.Time{})
- case oid.T_bytea:
- return reflect.TypeOf([]byte(nil))
- default:
- return reflect.TypeOf(new(interface{})).Elem()
- }
-}
-
-func (fd fieldDesc) Name() string {
- return oid.TypeName[fd.OID]
-}
-
-func (fd fieldDesc) Length() (length int64, ok bool) {
- switch fd.OID {
- case oid.T_text, oid.T_bytea:
- return math.MaxInt64, true
- case oid.T_varchar, oid.T_bpchar:
- return int64(fd.Mod - headerSize), true
- default:
- return 0, false
- }
-}
-
-func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
- switch fd.OID {
- case oid.T_numeric, oid.T__numeric:
- mod := fd.Mod - headerSize
- precision = int64((mod >> 16) & 0xffff)
- scale = int64(mod & 0xffff)
- return precision, scale, true
- default:
- return 0, 0, false
- }
-}
-
-// ColumnTypeScanType returns the value type that can be used to scan types into.
-func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
- return rs.colTyps[index].Type()
-}
-
-// ColumnTypeDatabaseTypeName return the database system type name.
-func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
- return rs.colTyps[index].Name()
-}
-
-// ColumnTypeLength returns the length of the column type if the column is a
-// variable length type. If the column is not a variable length type ok
-// should return false.
-func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
- return rs.colTyps[index].Length()
-}
-
-// ColumnTypePrecisionScale should return the precision and scale for decimal
-// types. If not applicable, ok should be false.
-func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
- return rs.colTyps[index].PrecisionScale()
-}
diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go
deleted file mode 100644
index e1a326a0..00000000
--- a/vendor/github.com/lib/pq/ssl.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package pq
-
-import (
- "crypto/tls"
- "crypto/x509"
- "io/ioutil"
- "net"
- "os"
- "os/user"
- "path/filepath"
-)
-
-// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
-// related settings. The function is nil when no upgrade should take place.
-func ssl(o values) (func(net.Conn) (net.Conn, error), error) {
- verifyCaOnly := false
- tlsConf := tls.Config{}
- switch mode := o["sslmode"]; mode {
- // "require" is the default.
- case "", "require":
- // We must skip TLS's own verification since it requires full
- // verification since Go 1.3.
- tlsConf.InsecureSkipVerify = true
-
- // From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
- //
- // Note: For backwards compatibility with earlier versions of
- // PostgreSQL, if a root CA file exists, the behavior of
- // sslmode=require will be the same as that of verify-ca, meaning the
- // server certificate is validated against the CA. Relying on this
- // behavior is discouraged, and applications that need certificate
- // validation should always use verify-ca or verify-full.
- if sslrootcert, ok := o["sslrootcert"]; ok {
- if _, err := os.Stat(sslrootcert); err == nil {
- verifyCaOnly = true
- } else {
- delete(o, "sslrootcert")
- }
- }
- case "verify-ca":
- // We must skip TLS's own verification since it requires full
- // verification since Go 1.3.
- tlsConf.InsecureSkipVerify = true
- verifyCaOnly = true
- case "verify-full":
- tlsConf.ServerName = o["host"]
- case "disable":
- return nil, nil
- default:
- return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
- }
-
- err := sslClientCertificates(&tlsConf, o)
- if err != nil {
- return nil, err
- }
- err = sslCertificateAuthority(&tlsConf, o)
- if err != nil {
- return nil, err
- }
- sslRenegotiation(&tlsConf)
-
- return func(conn net.Conn) (net.Conn, error) {
- client := tls.Client(conn, &tlsConf)
- if verifyCaOnly {
- err := sslVerifyCertificateAuthority(client, &tlsConf)
- if err != nil {
- return nil, err
- }
- }
- return client, nil
- }, nil
-}
-
-// sslClientCertificates adds the certificate specified in the "sslcert" and
-// "sslkey" settings, or if they aren't set, from the .postgresql directory
-// in the user's home directory. The configured files must exist and have
-// the correct permissions.
-func sslClientCertificates(tlsConf *tls.Config, o values) error {
- // user.Current() might fail when cross-compiling. We have to ignore the
- // error and continue without home directory defaults, since we wouldn't
- // know from where to load them.
- user, _ := user.Current()
-
- // In libpq, the client certificate is only loaded if the setting is not blank.
- //
- // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
- sslcert := o["sslcert"]
- if len(sslcert) == 0 && user != nil {
- sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
- }
- // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
- if len(sslcert) == 0 {
- return nil
- }
- // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
- if _, err := os.Stat(sslcert); os.IsNotExist(err) {
- return nil
- } else if err != nil {
- return err
- }
-
- // In libpq, the ssl key is only loaded if the setting is not blank.
- //
- // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
- sslkey := o["sslkey"]
- if len(sslkey) == 0 && user != nil {
- sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
- }
-
- if len(sslkey) > 0 {
- if err := sslKeyPermissions(sslkey); err != nil {
- return err
- }
- }
-
- cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
- if err != nil {
- return err
- }
-
- tlsConf.Certificates = []tls.Certificate{cert}
- return nil
-}
-
-// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
-func sslCertificateAuthority(tlsConf *tls.Config, o values) error {
- // In libpq, the root certificate is only loaded if the setting is not blank.
- //
- // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
- if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
- tlsConf.RootCAs = x509.NewCertPool()
-
- cert, err := ioutil.ReadFile(sslrootcert)
- if err != nil {
- return err
- }
-
- if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
- return fmterrorf("couldn't parse pem in sslrootcert")
- }
- }
-
- return nil
-}
-
-// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
-// verifies the presented certificate against the CA, i.e. the one specified in
-// sslrootcert or the system CA if sslrootcert was not specified.
-func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error {
- err := client.Handshake()
- if err != nil {
- return err
- }
- certs := client.ConnectionState().PeerCertificates
- opts := x509.VerifyOptions{
- DNSName: client.ConnectionState().ServerName,
- Intermediates: x509.NewCertPool(),
- Roots: tlsConf.RootCAs,
- }
- for i, cert := range certs {
- if i == 0 {
- continue
- }
- opts.Intermediates.AddCert(cert)
- }
- _, err = certs[0].Verify(opts)
- return err
-}
diff --git a/vendor/github.com/lib/pq/ssl_go1.7.go b/vendor/github.com/lib/pq/ssl_go1.7.go
deleted file mode 100644
index d7ba43b3..00000000
--- a/vendor/github.com/lib/pq/ssl_go1.7.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build go1.7
-
-package pq
-
-import "crypto/tls"
-
-// Accept renegotiation requests initiated by the backend.
-//
-// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
-// the default configuration of older versions has it enabled. Redshift
-// also initiates renegotiations and cannot be reconfigured.
-func sslRenegotiation(conf *tls.Config) {
- conf.Renegotiation = tls.RenegotiateFreelyAsClient
-}
diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go
deleted file mode 100644
index 3b7c3a2a..00000000
--- a/vendor/github.com/lib/pq/ssl_permissions.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build !windows
-
-package pq
-
-import "os"
-
-// sslKeyPermissions checks the permissions on user-supplied ssl key files.
-// The key file should have very little access.
-//
-// libpq does not check key file permissions on Windows.
-func sslKeyPermissions(sslkey string) error {
- info, err := os.Stat(sslkey)
- if err != nil {
- return err
- }
- if info.Mode().Perm()&0077 != 0 {
- return ErrSSLKeyHasWorldPermissions
- }
- return nil
-}
diff --git a/vendor/github.com/lib/pq/ssl_renegotiation.go b/vendor/github.com/lib/pq/ssl_renegotiation.go
deleted file mode 100644
index 85ed5e43..00000000
--- a/vendor/github.com/lib/pq/ssl_renegotiation.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !go1.7
-
-package pq
-
-import "crypto/tls"
-
-// Renegotiation is not supported by crypto/tls until Go 1.7.
-func sslRenegotiation(*tls.Config) {}
diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go
deleted file mode 100644
index 5d2c763c..00000000
--- a/vendor/github.com/lib/pq/ssl_windows.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build windows
-
-package pq
-
-// sslKeyPermissions checks the permissions on user-supplied ssl key files.
-// The key file should have very little access.
-//
-// libpq does not check key file permissions on Windows.
-func sslKeyPermissions(string) error { return nil }
diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go
deleted file mode 100644
index f4d8a7c2..00000000
--- a/vendor/github.com/lib/pq/url.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package pq
-
-import (
- "fmt"
- "net"
- nurl "net/url"
- "sort"
- "strings"
-)
-
-// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
-// connection string to sql.Open() is now supported:
-//
-// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
-//
-// It remains exported here for backwards-compatibility.
-//
-// ParseURL converts a url to a connection string for driver.Open.
-// Example:
-//
-// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
-//
-// converts to:
-//
-// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
-//
-// A minimal example:
-//
-// "postgres://"
-//
-// This will be blank, causing driver.Open to use all of the defaults
-func ParseURL(url string) (string, error) {
- u, err := nurl.Parse(url)
- if err != nil {
- return "", err
- }
-
- if u.Scheme != "postgres" && u.Scheme != "postgresql" {
- return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
- }
-
- var kvs []string
- escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
- accrue := func(k, v string) {
- if v != "" {
- kvs = append(kvs, k+"="+escaper.Replace(v))
- }
- }
-
- if u.User != nil {
- v := u.User.Username()
- accrue("user", v)
-
- v, _ = u.User.Password()
- accrue("password", v)
- }
-
- if host, port, err := net.SplitHostPort(u.Host); err != nil {
- accrue("host", u.Host)
- } else {
- accrue("host", host)
- accrue("port", port)
- }
-
- if u.Path != "" {
- accrue("dbname", u.Path[1:])
- }
-
- q := u.Query()
- for k := range q {
- accrue(k, q.Get(k))
- }
-
- sort.Strings(kvs) // Makes testing easier (not a performance concern)
- return strings.Join(kvs, " "), nil
-}
diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go
deleted file mode 100644
index bf982524..00000000
--- a/vendor/github.com/lib/pq/user_posix.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Package pq is a pure Go Postgres driver for the database/sql package.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
-
-package pq
-
-import (
- "os"
- "os/user"
-)
-
-func userCurrent() (string, error) {
- u, err := user.Current()
- if err == nil {
- return u.Username, nil
- }
-
- name := os.Getenv("USER")
- if name != "" {
- return name, nil
- }
-
- return "", ErrCouldNotDetectUsername
-}
diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go
deleted file mode 100644
index 2b691267..00000000
--- a/vendor/github.com/lib/pq/user_windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Package pq is a pure Go Postgres driver for the database/sql package.
-package pq
-
-import (
- "path/filepath"
- "syscall"
-)
-
-// Perform Windows user name lookup identically to libpq.
-//
-// The PostgreSQL code makes use of the legacy Win32 function
-// GetUserName, and that function has not been imported into stock Go.
-// GetUserNameEx is available though, the difference being that a
-// wider range of names are available. To get the output to be the
-// same as GetUserName, only the base (or last) component of the
-// result is returned.
-func userCurrent() (string, error) {
- pw_name := make([]uint16, 128)
- pwname_size := uint32(len(pw_name)) - 1
- err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size)
- if err != nil {
- return "", ErrCouldNotDetectUsername
- }
- s := syscall.UTF16ToString(pw_name)
- u := filepath.Base(s)
- return u, nil
-}
diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go
deleted file mode 100644
index 9a1b9e07..00000000
--- a/vendor/github.com/lib/pq/uuid.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package pq
-
-import (
- "encoding/hex"
- "fmt"
-)
-
-// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
-func decodeUUIDBinary(src []byte) ([]byte, error) {
- if len(src) != 16 {
- return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
- }
-
- dst := make([]byte, 36)
- dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
- hex.Encode(dst[0:], src[0:4])
- hex.Encode(dst[9:], src[4:6])
- hex.Encode(dst[14:], src[6:8])
- hex.Encode(dst[19:], src[8:10])
- hex.Encode(dst[24:], src[10:16])
-
- return dst, nil
-}
diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml
deleted file mode 100644
index d7b9589a..00000000
--- a/vendor/github.com/mitchellh/copystructure/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-
-go:
- - 1.7
- - tip
-
-script:
- - go test
-
-matrix:
- allow_failures:
- - go: tip
diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE
deleted file mode 100644
index 22985159..00000000
--- a/vendor/github.com/mitchellh/copystructure/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md
deleted file mode 100644
index f0fbd2e5..00000000
--- a/vendor/github.com/mitchellh/copystructure/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# copystructure
-
-copystructure is a Go library for deep copying values in Go.
-
-This allows you to copy Go values that may contain reference values
-such as maps, slices, or pointers, and copy their data as well instead
-of just their references.
-
-## Installation
-
-Standard `go get`:
-
-```
-$ go get github.com/mitchellh/copystructure
-```
-
-## Usage & Example
-
-For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
-
-The `Copy` function has examples associated with it there.
diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go
deleted file mode 100644
index db6a6aa1..00000000
--- a/vendor/github.com/mitchellh/copystructure/copier_time.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package copystructure
-
-import (
- "reflect"
- "time"
-)
-
-func init() {
- Copiers[reflect.TypeOf(time.Time{})] = timeCopier
-}
-
-func timeCopier(v interface{}) (interface{}, error) {
- // Just... copy it.
- return v.(time.Time), nil
-}
diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go
deleted file mode 100644
index 14043525..00000000
--- a/vendor/github.com/mitchellh/copystructure/copystructure.go
+++ /dev/null
@@ -1,548 +0,0 @@
-package copystructure
-
-import (
- "errors"
- "reflect"
- "sync"
-
- "github.com/mitchellh/reflectwalk"
-)
-
-// Copy returns a deep copy of v.
-func Copy(v interface{}) (interface{}, error) {
- return Config{}.Copy(v)
-}
-
-// CopierFunc is a function that knows how to deep copy a specific type.
-// Register these globally with the Copiers variable.
-type CopierFunc func(interface{}) (interface{}, error)
-
-// Copiers is a map of types that behave specially when they are copied.
-// If a type is found in this map while deep copying, this function
-// will be called to copy it instead of attempting to copy all fields.
-//
-// The key should be the type, obtained using: reflect.TypeOf(value with type).
-//
-// It is unsafe to write to this map after Copies have started. If you
-// are writing to this map while also copying, wrap all modifications to
-// this map as well as to Copy in a mutex.
-var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
-
-// Must is a helper that wraps a call to a function returning
-// (interface{}, error) and panics if the error is non-nil. It is intended
-// for use in variable initializations and should only be used when a copy
-// error should be a crashing case.
-func Must(v interface{}, err error) interface{} {
- if err != nil {
- panic("copy error: " + err.Error())
- }
-
- return v
-}
-
-var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
-
-type Config struct {
- // Lock any types that are a sync.Locker and are not a mutex while copying.
- // If there is an RLocker method, use that to get the sync.Locker.
- Lock bool
-
- // Copiers is a map of types associated with a CopierFunc. Use the global
- // Copiers map if this is nil.
- Copiers map[reflect.Type]CopierFunc
-}
-
-func (c Config) Copy(v interface{}) (interface{}, error) {
- if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
- return nil, errPointerRequired
- }
-
- w := new(walker)
- if c.Lock {
- w.useLocks = true
- }
-
- if c.Copiers == nil {
- c.Copiers = Copiers
- }
-
- err := reflectwalk.Walk(v, w)
- if err != nil {
- return nil, err
- }
-
- // Get the result. If the result is nil, then we want to turn it
- // into a typed nil if we can.
- result := w.Result
- if result == nil {
- val := reflect.ValueOf(v)
- result = reflect.Indirect(reflect.New(val.Type())).Interface()
- }
-
- return result, nil
-}
-
-// Return the key used to index interfaces types we've seen. Store the number
-// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
-// easy to calculate, easy to match a key with our current depth, and we don't
-// need to deal with initializing and cleaning up nested maps or slices.
-func ifaceKey(pointers, depth int) uint64 {
- return uint64(pointers)<<32 | uint64(depth)
-}
-
-type walker struct {
- Result interface{}
-
- depth int
- ignoreDepth int
- vals []reflect.Value
- cs []reflect.Value
-
- // This stores the number of pointers we've walked over, indexed by depth.
- ps []int
-
- // If an interface is indirected by a pointer, we need to know the type of
- // interface to create when creating the new value. Store the interface
- // types here, indexed by both the walk depth and the number of pointers
- // already seen at that depth. Use ifaceKey to calculate the proper uint64
- // value.
- ifaceTypes map[uint64]reflect.Type
-
- // any locks we've taken, indexed by depth
- locks []sync.Locker
- // take locks while walking the structure
- useLocks bool
-}
-
-func (w *walker) Enter(l reflectwalk.Location) error {
- w.depth++
-
- // ensure we have enough elements to index via w.depth
- for w.depth >= len(w.locks) {
- w.locks = append(w.locks, nil)
- }
-
- for len(w.ps) < w.depth+1 {
- w.ps = append(w.ps, 0)
- }
-
- return nil
-}
-
-func (w *walker) Exit(l reflectwalk.Location) error {
- locker := w.locks[w.depth]
- w.locks[w.depth] = nil
- if locker != nil {
- defer locker.Unlock()
- }
-
- // clear out pointers and interfaces as we exit the stack
- w.ps[w.depth] = 0
-
- for k := range w.ifaceTypes {
- mask := uint64(^uint32(0))
- if k&mask == uint64(w.depth) {
- delete(w.ifaceTypes, k)
- }
- }
-
- w.depth--
- if w.ignoreDepth > w.depth {
- w.ignoreDepth = 0
- }
-
- if w.ignoring() {
- return nil
- }
-
- switch l {
- case reflectwalk.Array:
- fallthrough
- case reflectwalk.Map:
- fallthrough
- case reflectwalk.Slice:
- w.replacePointerMaybe()
-
- // Pop map off our container
- w.cs = w.cs[:len(w.cs)-1]
- case reflectwalk.MapValue:
- // Pop off the key and value
- mv := w.valPop()
- mk := w.valPop()
- m := w.cs[len(w.cs)-1]
-
- // If mv is the zero value, SetMapIndex deletes the key form the map,
- // or in this case never adds it. We need to create a properly typed
- // zero value so that this key can be set.
- if !mv.IsValid() {
- mv = reflect.Zero(m.Elem().Type().Elem())
- }
- m.Elem().SetMapIndex(mk, mv)
- case reflectwalk.ArrayElem:
- // Pop off the value and the index and set it on the array
- v := w.valPop()
- i := w.valPop().Interface().(int)
- if v.IsValid() {
- a := w.cs[len(w.cs)-1]
- ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
- if ae.CanSet() {
- ae.Set(v)
- }
- }
- case reflectwalk.SliceElem:
- // Pop off the value and the index and set it on the slice
- v := w.valPop()
- i := w.valPop().Interface().(int)
- if v.IsValid() {
- s := w.cs[len(w.cs)-1]
- se := s.Elem().Index(i)
- if se.CanSet() {
- se.Set(v)
- }
- }
- case reflectwalk.Struct:
- w.replacePointerMaybe()
-
- // Remove the struct from the container stack
- w.cs = w.cs[:len(w.cs)-1]
- case reflectwalk.StructField:
- // Pop off the value and the field
- v := w.valPop()
- f := w.valPop().Interface().(reflect.StructField)
- if v.IsValid() {
- s := w.cs[len(w.cs)-1]
- sf := reflect.Indirect(s).FieldByName(f.Name)
-
- if sf.CanSet() {
- sf.Set(v)
- }
- }
- case reflectwalk.WalkLoc:
- // Clear out the slices for GC
- w.cs = nil
- w.vals = nil
- }
-
- return nil
-}
-
-func (w *walker) Map(m reflect.Value) error {
- if w.ignoring() {
- return nil
- }
- w.lock(m)
-
- // Create the map. If the map itself is nil, then just make a nil map
- var newMap reflect.Value
- if m.IsNil() {
- newMap = reflect.New(m.Type())
- } else {
- newMap = wrapPtr(reflect.MakeMap(m.Type()))
- }
-
- w.cs = append(w.cs, newMap)
- w.valPush(newMap)
- return nil
-}
-
-func (w *walker) MapElem(m, k, v reflect.Value) error {
- return nil
-}
-
-func (w *walker) PointerEnter(v bool) error {
- if v {
- w.ps[w.depth]++
- }
- return nil
-}
-
-func (w *walker) PointerExit(v bool) error {
- if v {
- w.ps[w.depth]--
- }
- return nil
-}
-
-func (w *walker) Interface(v reflect.Value) error {
- if !v.IsValid() {
- return nil
- }
- if w.ifaceTypes == nil {
- w.ifaceTypes = make(map[uint64]reflect.Type)
- }
-
- w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
- return nil
-}
-
-func (w *walker) Primitive(v reflect.Value) error {
- if w.ignoring() {
- return nil
- }
- w.lock(v)
-
- // IsValid verifies the v is non-zero and CanInterface verifies
- // that we're allowed to read this value (unexported fields).
- var newV reflect.Value
- if v.IsValid() && v.CanInterface() {
- newV = reflect.New(v.Type())
- newV.Elem().Set(v)
- }
-
- w.valPush(newV)
- w.replacePointerMaybe()
- return nil
-}
-
-func (w *walker) Slice(s reflect.Value) error {
- if w.ignoring() {
- return nil
- }
- w.lock(s)
-
- var newS reflect.Value
- if s.IsNil() {
- newS = reflect.New(s.Type())
- } else {
- newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
- }
-
- w.cs = append(w.cs, newS)
- w.valPush(newS)
- return nil
-}
-
-func (w *walker) SliceElem(i int, elem reflect.Value) error {
- if w.ignoring() {
- return nil
- }
-
- // We don't write the slice here because elem might still be
- // arbitrarily complex. Just record the index and continue on.
- w.valPush(reflect.ValueOf(i))
-
- return nil
-}
-
-func (w *walker) Array(a reflect.Value) error {
- if w.ignoring() {
- return nil
- }
- w.lock(a)
-
- newA := reflect.New(a.Type())
-
- w.cs = append(w.cs, newA)
- w.valPush(newA)
- return nil
-}
-
-func (w *walker) ArrayElem(i int, elem reflect.Value) error {
- if w.ignoring() {
- return nil
- }
-
- // We don't write the array here because elem might still be
- // arbitrarily complex. Just record the index and continue on.
- w.valPush(reflect.ValueOf(i))
-
- return nil
-}
-
-func (w *walker) Struct(s reflect.Value) error {
- if w.ignoring() {
- return nil
- }
- w.lock(s)
-
- var v reflect.Value
- if c, ok := Copiers[s.Type()]; ok {
- // We have a Copier for this struct, so we use that copier to
- // get the copy, and we ignore anything deeper than this.
- w.ignoreDepth = w.depth
-
- dup, err := c(s.Interface())
- if err != nil {
- return err
- }
-
- // We need to put a pointer to the value on the value stack,
- // so allocate a new pointer and set it.
- v = reflect.New(s.Type())
- reflect.Indirect(v).Set(reflect.ValueOf(dup))
- } else {
- // No copier, we copy ourselves and allow reflectwalk to guide
- // us deeper into the structure for copying.
- v = reflect.New(s.Type())
- }
-
- // Push the value onto the value stack for setting the struct field,
- // and add the struct itself to the containers stack in case we walk
- // deeper so that its own fields can be modified.
- w.valPush(v)
- w.cs = append(w.cs, v)
-
- return nil
-}
-
-func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
- if w.ignoring() {
- return nil
- }
-
- // If PkgPath is non-empty, this is a private (unexported) field.
- // We do not set this unexported since the Go runtime doesn't allow us.
- if f.PkgPath != "" {
- return reflectwalk.SkipEntry
- }
-
- // Push the field onto the stack, we'll handle it when we exit
- // the struct field in Exit...
- w.valPush(reflect.ValueOf(f))
- return nil
-}
-
-// ignore causes the walker to ignore any more values until we exit this on
-func (w *walker) ignore() {
- w.ignoreDepth = w.depth
-}
-
-func (w *walker) ignoring() bool {
- return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
-}
-
-func (w *walker) pointerPeek() bool {
- return w.ps[w.depth] > 0
-}
-
-func (w *walker) valPop() reflect.Value {
- result := w.vals[len(w.vals)-1]
- w.vals = w.vals[:len(w.vals)-1]
-
- // If we're out of values, that means we popped everything off. In
- // this case, we reset the result so the next pushed value becomes
- // the result.
- if len(w.vals) == 0 {
- w.Result = nil
- }
-
- return result
-}
-
-func (w *walker) valPush(v reflect.Value) {
- w.vals = append(w.vals, v)
-
- // If we haven't set the result yet, then this is the result since
- // it is the first (outermost) value we're seeing.
- if w.Result == nil && v.IsValid() {
- w.Result = v.Interface()
- }
-}
-
-func (w *walker) replacePointerMaybe() {
- // Determine the last pointer value. If it is NOT a pointer, then
- // we need to push that onto the stack.
- if !w.pointerPeek() {
- w.valPush(reflect.Indirect(w.valPop()))
- return
- }
-
- v := w.valPop()
-
- // If the expected type is a pointer to an interface of any depth,
- // such as *interface{}, **interface{}, etc., then we need to convert
- // the value "v" from *CONCRETE to *interface{} so types match for
- // Set.
- //
- // Example if v is type *Foo where Foo is a struct, v would become
- // *interface{} instead. This only happens if we have an interface expectation
- // at this depth.
- //
- // For more info, see GH-16
- if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
- y := reflect.New(iType) // Create *interface{}
- y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
- v = y // v is now typed *interface{} (where *v = Foo)
- }
-
- for i := 1; i < w.ps[w.depth]; i++ {
- if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
- iface := reflect.New(iType).Elem()
- iface.Set(v)
- v = iface
- }
-
- p := reflect.New(v.Type())
- p.Elem().Set(v)
- v = p
- }
-
- w.valPush(v)
-}
-
-// if this value is a Locker, lock it and add it to the locks slice
-func (w *walker) lock(v reflect.Value) {
- if !w.useLocks {
- return
- }
-
- if !v.IsValid() || !v.CanInterface() {
- return
- }
-
- type rlocker interface {
- RLocker() sync.Locker
- }
-
- var locker sync.Locker
-
- // We can't call Interface() on a value directly, since that requires
- // a copy. This is OK, since the pointer to a value which is a sync.Locker
- // is also a sync.Locker.
- if v.Kind() == reflect.Ptr {
- switch l := v.Interface().(type) {
- case rlocker:
- // don't lock a mutex directly
- if _, ok := l.(*sync.RWMutex); !ok {
- locker = l.RLocker()
- }
- case sync.Locker:
- locker = l
- }
- } else if v.CanAddr() {
- switch l := v.Addr().Interface().(type) {
- case rlocker:
- // don't lock a mutex directly
- if _, ok := l.(*sync.RWMutex); !ok {
- locker = l.RLocker()
- }
- case sync.Locker:
- locker = l
- }
- }
-
- // still no callable locker
- if locker == nil {
- return
- }
-
- // don't lock a mutex directly
- switch locker.(type) {
- case *sync.Mutex, *sync.RWMutex:
- return
- }
-
- locker.Lock()
- w.locks[w.depth] = locker
-}
-
-// wrapPtr is a helper that takes v and always make it *v. copystructure
-// stores things internally as pointers until the last moment before unwrapping
-func wrapPtr(v reflect.Value) reflect.Value {
- if !v.IsValid() {
- return v
- }
- vPtr := reflect.New(v.Type())
- vPtr.Elem().Set(v)
- return vPtr
-}
diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod
deleted file mode 100644
index d0186430..00000000
--- a/vendor/github.com/mitchellh/copystructure/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/mitchellh/copystructure
-
-require github.com/mitchellh/reflectwalk v1.0.0
diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum
deleted file mode 100644
index be572456..00000000
--- a/vendor/github.com/mitchellh/copystructure/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
deleted file mode 100644
index f9c841a5..00000000
--- a/vendor/github.com/mitchellh/go-homedir/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
deleted file mode 100644
index d70706d5..00000000
--- a/vendor/github.com/mitchellh/go-homedir/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# go-homedir
-
-This is a Go library for detecting the user's home directory without
-the use of cgo, so the library can be used in cross-compilation environments.
-
-Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
-for a user, and `homedir.Expand()` to expand the `~` in a path to the home
-directory.
-
-**Why not just use `os/user`?** The built-in `os/user` package requires
-cgo on Darwin systems. This means that any Go code that uses that package
-cannot cross compile. But 99% of the time the use for `os/user` is just to
-retrieve the home directory, which we can do for the current user without
-cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod
deleted file mode 100644
index 7efa09a0..00000000
--- a/vendor/github.com/mitchellh/go-homedir/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/mitchellh/go-homedir
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
deleted file mode 100644
index 25378537..00000000
--- a/vendor/github.com/mitchellh/go-homedir/homedir.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package homedir
-
-import (
- "bytes"
- "errors"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "sync"
-)
-
-// DisableCache will disable caching of the home directory. Caching is enabled
-// by default.
-var DisableCache bool
-
-var homedirCache string
-var cacheLock sync.RWMutex
-
-// Dir returns the home directory for the executing user.
-//
-// This uses an OS-specific method for discovering the home directory.
-// An error is returned if a home directory cannot be detected.
-func Dir() (string, error) {
- if !DisableCache {
- cacheLock.RLock()
- cached := homedirCache
- cacheLock.RUnlock()
- if cached != "" {
- return cached, nil
- }
- }
-
- cacheLock.Lock()
- defer cacheLock.Unlock()
-
- var result string
- var err error
- if runtime.GOOS == "windows" {
- result, err = dirWindows()
- } else {
- // Unix-like system, so just assume Unix
- result, err = dirUnix()
- }
-
- if err != nil {
- return "", err
- }
- homedirCache = result
- return result, nil
-}
-
-// Expand expands the path to include the home directory if the path
-// is prefixed with `~`. If it isn't prefixed with `~`, the path is
-// returned as-is.
-func Expand(path string) (string, error) {
- if len(path) == 0 {
- return path, nil
- }
-
- if path[0] != '~' {
- return path, nil
- }
-
- if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
- return "", errors.New("cannot expand user-specific home dir")
- }
-
- dir, err := Dir()
- if err != nil {
- return "", err
- }
-
- return filepath.Join(dir, path[1:]), nil
-}
-
-// Reset clears the cache, forcing the next call to Dir to re-detect
-// the home directory. This generally never has to be called, but can be
-// useful in tests if you're modifying the home directory via the HOME
-// env var or something.
-func Reset() {
- cacheLock.Lock()
- defer cacheLock.Unlock()
- homedirCache = ""
-}
-
-func dirUnix() (string, error) {
- homeEnv := "HOME"
- if runtime.GOOS == "plan9" {
- // On plan9, env vars are lowercase.
- homeEnv = "home"
- }
-
- // First prefer the HOME environmental variable
- if home := os.Getenv(homeEnv); home != "" {
- return home, nil
- }
-
- var stdout bytes.Buffer
-
- // If that fails, try OS specific commands
- if runtime.GOOS == "darwin" {
- cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
- cmd.Stdout = &stdout
- if err := cmd.Run(); err == nil {
- result := strings.TrimSpace(stdout.String())
- if result != "" {
- return result, nil
- }
- }
- } else {
- cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
- cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- // If the error is ErrNotFound, we ignore it. Otherwise, return it.
- if err != exec.ErrNotFound {
- return "", err
- }
- } else {
- if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
- // username:password:uid:gid:gecos:home:shell
- passwdParts := strings.SplitN(passwd, ":", 7)
- if len(passwdParts) > 5 {
- return passwdParts[5], nil
- }
- }
- }
- }
-
- // If all else fails, try the shell
- stdout.Reset()
- cmd := exec.Command("sh", "-c", "cd && pwd")
- cmd.Stdout = &stdout
- if err := cmd.Run(); err != nil {
- return "", err
- }
-
- result := strings.TrimSpace(stdout.String())
- if result == "" {
- return "", errors.New("blank output when reading home directory")
- }
-
- return result, nil
-}
-
-func dirWindows() (string, error) {
- // First prefer the HOME environmental variable
- if home := os.Getenv("HOME"); home != "" {
- return home, nil
- }
-
- // Prefer standard environment variable USERPROFILE
- if home := os.Getenv("USERPROFILE"); home != "" {
- return home, nil
- }
-
- drive := os.Getenv("HOMEDRIVE")
- path := os.Getenv("HOMEPATH")
- home := drive + path
- if drive == "" || path == "" {
- return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
- }
-
- return home, nil
-}
diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml
deleted file mode 100644
index 928d000e..00000000
--- a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
- - 1.8
- - 1.x
- - tip
-
-script:
- - go test
-
-matrix:
- allow_failures:
- - go: tip
diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE
deleted file mode 100644
index a3866a29..00000000
--- a/vendor/github.com/mitchellh/go-testing-interface/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md
deleted file mode 100644
index 26781bba..00000000
--- a/vendor/github.com/mitchellh/go-testing-interface/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# go-testing-interface
-
-go-testing-interface is a Go library that exports an interface that
-`*testing.T` implements as well as a runtime version you can use in its
-place.
-
-The purpose of this library is so that you can export test helpers as a
-public API without depending on the "testing" package, since you can't
-create a `*testing.T` struct manually. This lets you, for example, use the
-public testing APIs to generate mock data at runtime, rather than just at
-test time.
-
-## Usage & Example
-
-For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface).
-
-Given a test helper written using `go-testing-interface` like this:
-
- import "github.com/mitchellh/go-testing-interface"
-
- func TestHelper(t testing.T) {
- t.Fatal("I failed")
- }
-
-You can call the test helper in a real test easily:
-
- import "testing"
-
- func TestThing(t *testing.T) {
- TestHelper(t)
- }
-
-You can also call the test helper at runtime if needed:
-
- import "github.com/mitchellh/go-testing-interface"
-
- func main() {
- TestHelper(&testing.RuntimeT{})
- }
-
-## Why?!
-
-**Why would I call a test helper that takes a *testing.T at runtime?**
-
-You probably shouldn't. The only use case I've seen (and I've had) for this
-is to implement a "dev mode" for a service where the test helpers are used
-to populate mock data, create a mock DB, perhaps run service dependencies
-in-memory, etc.
-
-Outside of a "dev mode", I've never seen a use case for this and I think
-there shouldn't be one since the point of the `testing.T` interface is that
-you can fail immediately.
diff --git a/vendor/github.com/mitchellh/go-testing-interface/go.mod b/vendor/github.com/mitchellh/go-testing-interface/go.mod
deleted file mode 100644
index 062796de..00000000
--- a/vendor/github.com/mitchellh/go-testing-interface/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/mitchellh/go-testing-interface
diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go
deleted file mode 100644
index 204afb42..00000000
--- a/vendor/github.com/mitchellh/go-testing-interface/testing.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// +build !go1.9
-
-package testing
-
-import (
- "fmt"
- "log"
-)
-
-// T is the interface that mimics the standard library *testing.T.
-//
-// In unit tests you can just pass a *testing.T struct. At runtime, outside
-// of tests, you can pass in a RuntimeT struct from this package.
-type T interface {
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
- Fail()
- FailNow()
- Failed() bool
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Log(args ...interface{})
- Logf(format string, args ...interface{})
- Name() string
- Skip(args ...interface{})
- SkipNow()
- Skipf(format string, args ...interface{})
- Skipped() bool
-}
-
-// RuntimeT implements T and can be instantiated and run at runtime to
-// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
-// for calls to Fatal. For calls to Error, you'll have to check the errors
-// list to determine whether to exit yourself. Name and Skip methods are
-// unimplemented noops.
-type RuntimeT struct {
- failed bool
-}
-
-func (t *RuntimeT) Error(args ...interface{}) {
- log.Println(fmt.Sprintln(args...))
- t.Fail()
-}
-
-func (t *RuntimeT) Errorf(format string, args ...interface{}) {
- log.Println(fmt.Sprintf(format, args...))
- t.Fail()
-}
-
-func (t *RuntimeT) Fatal(args ...interface{}) {
- log.Println(fmt.Sprintln(args...))
- t.FailNow()
-}
-
-func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
- log.Println(fmt.Sprintf(format, args...))
- t.FailNow()
-}
-
-func (t *RuntimeT) Fail() {
- t.failed = true
-}
-
-func (t *RuntimeT) FailNow() {
- panic("testing.T failed, see logs for output (if any)")
-}
-
-func (t *RuntimeT) Failed() bool {
- return t.failed
-}
-
-func (t *RuntimeT) Log(args ...interface{}) {
- log.Println(fmt.Sprintln(args...))
-}
-
-func (t *RuntimeT) Logf(format string, args ...interface{}) {
- log.Println(fmt.Sprintf(format, args...))
-}
-
-func (t *RuntimeT) Name() string { return "" }
-func (t *RuntimeT) Skip(args ...interface{}) {}
-func (t *RuntimeT) SkipNow() {}
-func (t *RuntimeT) Skipf(format string, args ...interface{}) {}
-func (t *RuntimeT) Skipped() bool { return false }
diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go
deleted file mode 100644
index 31b42cad..00000000
--- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// +build go1.9
-
-// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition
-// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC
-// and is set for release shortly. We'll support this on master as the default
-// as soon as 1.9 is released.
-
-package testing
-
-import (
- "fmt"
- "log"
-)
-
-// T is the interface that mimics the standard library *testing.T.
-//
-// In unit tests you can just pass a *testing.T struct. At runtime, outside
-// of tests, you can pass in a RuntimeT struct from this package.
-type T interface {
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
- Fail()
- FailNow()
- Failed() bool
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Log(args ...interface{})
- Logf(format string, args ...interface{})
- Name() string
- Skip(args ...interface{})
- SkipNow()
- Skipf(format string, args ...interface{})
- Skipped() bool
- Helper()
-}
-
-// RuntimeT implements T and can be instantiated and run at runtime to
-// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
-// for calls to Fatal. For calls to Error, you'll have to check the errors
-// list to determine whether to exit yourself.
-type RuntimeT struct {
- skipped bool
- failed bool
-}
-
-func (t *RuntimeT) Error(args ...interface{}) {
- log.Println(fmt.Sprintln(args...))
- t.Fail()
-}
-
-func (t *RuntimeT) Errorf(format string, args ...interface{}) {
- log.Printf(format, args...)
- t.Fail()
-}
-
-func (t *RuntimeT) Fail() {
- t.failed = true
-}
-
-func (t *RuntimeT) FailNow() {
- panic("testing.T failed, see logs for output (if any)")
-}
-
-func (t *RuntimeT) Failed() bool {
- return t.failed
-}
-
-func (t *RuntimeT) Fatal(args ...interface{}) {
- log.Print(args...)
- t.FailNow()
-}
-
-func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
- log.Printf(format, args...)
- t.FailNow()
-}
-
-func (t *RuntimeT) Log(args ...interface{}) {
- log.Println(fmt.Sprintln(args...))
-}
-
-func (t *RuntimeT) Logf(format string, args ...interface{}) {
- log.Println(fmt.Sprintf(format, args...))
-}
-
-func (t *RuntimeT) Name() string {
- return ""
-}
-
-func (t *RuntimeT) Skip(args ...interface{}) {
- log.Print(args...)
- t.SkipNow()
-}
-
-func (t *RuntimeT) SkipNow() {
- t.skipped = true
-}
-
-func (t *RuntimeT) Skipf(format string, args ...interface{}) {
- log.Printf(format, args...)
- t.SkipNow()
-}
-
-func (t *RuntimeT) Skipped() bool {
- return t.skipped
-}
-
-func (t *RuntimeT) Helper() {}
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
deleted file mode 100644
index 1689c7d7..00000000
--- a/vendor/github.com/mitchellh/mapstructure/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-
-go:
- - "1.11.x"
- - tip
-
-script:
- - go test
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
deleted file mode 100644
index 3b3cb723..00000000
--- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
+++ /dev/null
@@ -1,21 +0,0 @@
-## 1.1.2
-
-* Fix error when decode hook decodes interface implementation into interface
- type. [GH-140]
-
-## 1.1.1
-
-* Fix panic that can happen in `decodePtr`
-
-## 1.1.0
-
-* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
-* Support struct to struct decoding [GH-137]
-* If source map value is nil, then destination map value is nil (instead of empty)
-* If source slice value is nil, then destination slice value is nil (instead of empty)
-* If source pointer is nil, then destination pointer is set to nil (instead of
- allocated zero value of type)
-
-## 1.0.0
-
-* Initial tagged stable release.
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
deleted file mode 100644
index f9c841a5..00000000
--- a/vendor/github.com/mitchellh/mapstructure/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
deleted file mode 100644
index 0018dc7d..00000000
--- a/vendor/github.com/mitchellh/mapstructure/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
-
-mapstructure is a Go library for decoding generic map values to structures
-and vice versa, while providing helpful error handling.
-
-This library is most useful when decoding values from some data stream (JSON,
-Gob, etc.) where you don't _quite_ know the structure of the underlying data
-until you read a part of it. You can therefore read a `map[string]interface{}`
-and use this library to decode it into the proper underlying native Go
-structure.
-
-## Installation
-
-Standard `go get`:
-
-```
-$ go get github.com/mitchellh/mapstructure
-```
-
-## Usage & Example
-
-For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
-
-The `Decode` function has examples associated with it there.
-
-## But Why?!
-
-Go offers fantastic standard libraries for decoding formats such as JSON.
-The standard method is to have a struct pre-created, and populate that struct
-from the bytes of the encoded format. This is great, but the problem is if
-you have configuration or an encoding that changes slightly depending on
-specific fields. For example, consider this JSON:
-
-```json
-{
- "type": "person",
- "name": "Mitchell"
-}
-```
-
-Perhaps we can't populate a specific structure without first reading
-the "type" field from the JSON. We could always do two passes over the
-decoding of the JSON (reading the "type" first, and the rest later).
-However, it is much simpler to just decode this into a `map[string]interface{}`
-structure, read the "type" key, then use something like this library
-to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
deleted file mode 100644
index 1f0abc65..00000000
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package mapstructure
-
-import (
- "errors"
- "fmt"
- "net"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
-// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
-func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
- // Create variables here so we can reference them with the reflect pkg
- var f1 DecodeHookFuncType
- var f2 DecodeHookFuncKind
-
- // Fill in the variables into this interface and the rest is done
- // automatically using the reflect package.
- potential := []interface{}{f1, f2}
-
- v := reflect.ValueOf(h)
- vt := v.Type()
- for _, raw := range potential {
- pt := reflect.ValueOf(raw).Type()
- if vt.ConvertibleTo(pt) {
- return v.Convert(pt).Interface()
- }
- }
-
- return nil
-}
-
-// DecodeHookExec executes the given decode hook. This should be used
-// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
-// that took reflect.Kind instead of reflect.Type.
-func DecodeHookExec(
- raw DecodeHookFunc,
- from reflect.Type, to reflect.Type,
- data interface{}) (interface{}, error) {
- switch f := typedDecodeHook(raw).(type) {
- case DecodeHookFuncType:
- return f(from, to, data)
- case DecodeHookFuncKind:
- return f(from.Kind(), to.Kind(), data)
- default:
- return nil, errors.New("invalid decode hook signature")
- }
-}
-
-// ComposeDecodeHookFunc creates a single DecodeHookFunc that
-// automatically composes multiple DecodeHookFuncs.
-//
-// The composed funcs are called in order, with the result of the
-// previous transformation.
-func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- var err error
- for _, f1 := range fs {
- data, err = DecodeHookExec(f1, f, t, data)
- if err != nil {
- return nil, err
- }
-
- // Modify the from kind to be correct with the new data
- f = nil
- if val := reflect.ValueOf(data); val.IsValid() {
- f = val.Type()
- }
- }
-
- return data, nil
- }
-}
-
-// StringToSliceHookFunc returns a DecodeHookFunc that converts
-// string to []string by splitting on the given sep.
-func StringToSliceHookFunc(sep string) DecodeHookFunc {
- return func(
- f reflect.Kind,
- t reflect.Kind,
- data interface{}) (interface{}, error) {
- if f != reflect.String || t != reflect.Slice {
- return data, nil
- }
-
- raw := data.(string)
- if raw == "" {
- return []string{}, nil
- }
-
- return strings.Split(raw, sep), nil
- }
-}
-
-// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
-// strings to time.Duration.
-func StringToTimeDurationHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(time.Duration(5)) {
- return data, nil
- }
-
- // Convert it by parsing
- return time.ParseDuration(data.(string))
- }
-}
-
-// StringToIPHookFunc returns a DecodeHookFunc that converts
-// strings to net.IP
-func StringToIPHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(net.IP{}) {
- return data, nil
- }
-
- // Convert it by parsing
- ip := net.ParseIP(data.(string))
- if ip == nil {
- return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
- }
-
- return ip, nil
- }
-}
-
-// StringToIPNetHookFunc returns a DecodeHookFunc that converts
-// strings to net.IPNet
-func StringToIPNetHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(net.IPNet{}) {
- return data, nil
- }
-
- // Convert it by parsing
- _, net, err := net.ParseCIDR(data.(string))
- return net, err
- }
-}
-
-// StringToTimeHookFunc returns a DecodeHookFunc that converts
-// strings to time.Time.
-func StringToTimeHookFunc(layout string) DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(time.Time{}) {
- return data, nil
- }
-
- // Convert it by parsing
- return time.Parse(layout, data.(string))
- }
-}
-
-// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
-// the decoder.
-//
-// Note that this is significantly different from the WeaklyTypedInput option
-// of the DecoderConfig.
-func WeaklyTypedHook(
- f reflect.Kind,
- t reflect.Kind,
- data interface{}) (interface{}, error) {
- dataVal := reflect.ValueOf(data)
- switch t {
- case reflect.String:
- switch f {
- case reflect.Bool:
- if dataVal.Bool() {
- return "1", nil
- }
- return "0", nil
- case reflect.Float32:
- return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
- case reflect.Int:
- return strconv.FormatInt(dataVal.Int(), 10), nil
- case reflect.Slice:
- dataType := dataVal.Type()
- elemKind := dataType.Elem().Kind()
- if elemKind == reflect.Uint8 {
- return string(dataVal.Interface().([]uint8)), nil
- }
- case reflect.Uint:
- return strconv.FormatUint(dataVal.Uint(), 10), nil
- }
- }
-
- return data, nil
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
deleted file mode 100644
index 47a99e5a..00000000
--- a/vendor/github.com/mitchellh/mapstructure/error.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package mapstructure
-
-import (
- "errors"
- "fmt"
- "sort"
- "strings"
-)
-
-// Error implements the error interface and can represents multiple
-// errors that occur in the course of a single decode.
-type Error struct {
- Errors []string
-}
-
-func (e *Error) Error() string {
- points := make([]string, len(e.Errors))
- for i, err := range e.Errors {
- points[i] = fmt.Sprintf("* %s", err)
- }
-
- sort.Strings(points)
- return fmt.Sprintf(
- "%d error(s) decoding:\n\n%s",
- len(e.Errors), strings.Join(points, "\n"))
-}
-
-// WrappedErrors implements the errwrap.Wrapper interface to make this
-// return value more useful with the errwrap and go-multierror libraries.
-func (e *Error) WrappedErrors() []error {
- if e == nil {
- return nil
- }
-
- result := make([]error, len(e.Errors))
- for i, e := range e.Errors {
- result[i] = errors.New(e)
- }
-
- return result
-}
-
-func appendErrors(errors []string, err error) []string {
- switch e := err.(type) {
- case *Error:
- return append(errors, e.Errors...)
- default:
- return append(errors, e.Error())
- }
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod
deleted file mode 100644
index d2a71256..00000000
--- a/vendor/github.com/mitchellh/mapstructure/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/mitchellh/mapstructure
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
deleted file mode 100644
index 256ee63f..00000000
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ /dev/null
@@ -1,1149 +0,0 @@
-// Package mapstructure exposes functionality to convert an arbitrary
-// map[string]interface{} into a native Go structure.
-//
-// The Go structure can be arbitrarily complex, containing slices,
-// other structs, etc. and the decoder will properly decode nested
-// maps and so on into the proper structures in the native Go struct.
-// See the examples to see what the decoder is capable of.
-package mapstructure
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
-)
-
-// DecodeHookFunc is the callback function that can be used for
-// data transformations. See "DecodeHook" in the DecoderConfig
-// struct.
-//
-// The type should be DecodeHookFuncType or DecodeHookFuncKind.
-// Either is accepted. Types are a superset of Kinds (Types can return
-// Kinds) and are generally a richer thing to use, but Kinds are simpler
-// if you only need those.
-//
-// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
-// we started with Kinds and then realized Types were the better solution,
-// but have a promise to not break backwards compat so we now support
-// both.
-type DecodeHookFunc interface{}
-
-// DecodeHookFuncType is a DecodeHookFunc which has complete information about
-// the source and target types.
-type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
-
-// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
-// source and target types.
-type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
-
-// DecoderConfig is the configuration that is used to create a new decoder
-// and allows customization of various aspects of decoding.
-type DecoderConfig struct {
- // DecodeHook, if set, will be called before any decoding and any
- // type conversion (if WeaklyTypedInput is on). This lets you modify
- // the values before they're set down onto the resulting struct.
- //
- // If an error is returned, the entire decode will fail with that
- // error.
- DecodeHook DecodeHookFunc
-
- // If ErrorUnused is true, then it is an error for there to exist
- // keys in the original map that were unused in the decoding process
- // (extra keys).
- ErrorUnused bool
-
- // ZeroFields, if set to true, will zero fields before writing them.
- // For example, a map will be emptied before decoded values are put in
- // it. If this is false, a map will be merged.
- ZeroFields bool
-
- // If WeaklyTypedInput is true, the decoder will make the following
- // "weak" conversions:
- //
- // - bools to string (true = "1", false = "0")
- // - numbers to string (base 10)
- // - bools to int/uint (true = 1, false = 0)
- // - strings to int/uint (base implied by prefix)
- // - int to bool (true if value != 0)
- // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
- // FALSE, false, False. Anything else is an error)
- // - empty array = empty map and vice versa
- // - negative numbers to overflowed uint values (base 10)
- // - slice of maps to a merged map
- // - single values are converted to slices if required. Each
- // element is weakly decoded. For example: "4" can become []int{4}
- // if the target type is an int slice.
- //
- WeaklyTypedInput bool
-
- // Metadata is the struct that will contain extra metadata about
- // the decoding. If this is nil, then no metadata will be tracked.
- Metadata *Metadata
-
- // Result is a pointer to the struct that will contain the decoded
- // value.
- Result interface{}
-
- // The tag name that mapstructure reads for field names. This
- // defaults to "mapstructure"
- TagName string
-}
-
-// A Decoder takes a raw interface value and turns it into structured
-// data, keeping track of rich error information along the way in case
-// anything goes wrong. Unlike the basic top-level Decode method, you can
-// more finely control how the Decoder behaves using the DecoderConfig
-// structure. The top-level Decode method is just a convenience that sets
-// up the most basic Decoder.
-type Decoder struct {
- config *DecoderConfig
-}
-
-// Metadata contains information about decoding a structure that
-// is tedious or difficult to get otherwise.
-type Metadata struct {
- // Keys are the keys of the structure which were successfully decoded
- Keys []string
-
- // Unused is a slice of keys that were found in the raw value but
- // weren't decoded since there was no matching field in the result interface
- Unused []string
-}
-
-// Decode takes an input structure and uses reflection to translate it to
-// the output structure. output must be a pointer to a map or struct.
-func Decode(input interface{}, output interface{}) error {
- config := &DecoderConfig{
- Metadata: nil,
- Result: output,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// WeakDecode is the same as Decode but is shorthand to enable
-// WeaklyTypedInput. See DecoderConfig for more info.
-func WeakDecode(input, output interface{}) error {
- config := &DecoderConfig{
- Metadata: nil,
- Result: output,
- WeaklyTypedInput: true,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// DecodeMetadata is the same as Decode, but is shorthand to
-// enable metadata collection. See DecoderConfig for more info.
-func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
- config := &DecoderConfig{
- Metadata: metadata,
- Result: output,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// WeakDecodeMetadata is the same as Decode, but is shorthand to
-// enable both WeaklyTypedInput and metadata collection. See
-// DecoderConfig for more info.
-func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
- config := &DecoderConfig{
- Metadata: metadata,
- Result: output,
- WeaklyTypedInput: true,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// NewDecoder returns a new decoder for the given configuration. Once
-// a decoder has been returned, the same configuration must not be used
-// again.
-func NewDecoder(config *DecoderConfig) (*Decoder, error) {
- val := reflect.ValueOf(config.Result)
- if val.Kind() != reflect.Ptr {
- return nil, errors.New("result must be a pointer")
- }
-
- val = val.Elem()
- if !val.CanAddr() {
- return nil, errors.New("result must be addressable (a pointer)")
- }
-
- if config.Metadata != nil {
- if config.Metadata.Keys == nil {
- config.Metadata.Keys = make([]string, 0)
- }
-
- if config.Metadata.Unused == nil {
- config.Metadata.Unused = make([]string, 0)
- }
- }
-
- if config.TagName == "" {
- config.TagName = "mapstructure"
- }
-
- result := &Decoder{
- config: config,
- }
-
- return result, nil
-}
-
-// Decode decodes the given raw interface to the target pointer specified
-// by the configuration.
-func (d *Decoder) Decode(input interface{}) error {
- return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
-}
-
-// Decodes an unknown data type into a specific reflection value.
-func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
- var inputVal reflect.Value
- if input != nil {
- inputVal = reflect.ValueOf(input)
-
- // We need to check here if input is a typed nil. Typed nils won't
- // match the "input == nil" below so we check that here.
- if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
- input = nil
- }
- }
-
- if input == nil {
- // If the data is nil, then we don't set anything, unless ZeroFields is set
- // to true.
- if d.config.ZeroFields {
- outVal.Set(reflect.Zero(outVal.Type()))
-
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
- }
- return nil
- }
-
- if !inputVal.IsValid() {
- // If the input value is invalid, then we just set the value
- // to be the zero value.
- outVal.Set(reflect.Zero(outVal.Type()))
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
- return nil
- }
-
- if d.config.DecodeHook != nil {
- // We have a DecodeHook, so let's pre-process the input.
- var err error
- input, err = DecodeHookExec(
- d.config.DecodeHook,
- inputVal.Type(), outVal.Type(), input)
- if err != nil {
- return fmt.Errorf("error decoding '%s': %s", name, err)
- }
- }
-
- var err error
- outputKind := getKind(outVal)
- switch outputKind {
- case reflect.Bool:
- err = d.decodeBool(name, input, outVal)
- case reflect.Interface:
- err = d.decodeBasic(name, input, outVal)
- case reflect.String:
- err = d.decodeString(name, input, outVal)
- case reflect.Int:
- err = d.decodeInt(name, input, outVal)
- case reflect.Uint:
- err = d.decodeUint(name, input, outVal)
- case reflect.Float32:
- err = d.decodeFloat(name, input, outVal)
- case reflect.Struct:
- err = d.decodeStruct(name, input, outVal)
- case reflect.Map:
- err = d.decodeMap(name, input, outVal)
- case reflect.Ptr:
- err = d.decodePtr(name, input, outVal)
- case reflect.Slice:
- err = d.decodeSlice(name, input, outVal)
- case reflect.Array:
- err = d.decodeArray(name, input, outVal)
- case reflect.Func:
- err = d.decodeFunc(name, input, outVal)
- default:
- // If we reached this point then we weren't able to decode it
- return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
- }
-
- // If we reached here, then we successfully decoded SOMETHING, so
- // mark the key as used if we're tracking metainput.
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
-
- return err
-}
-
-// This decodes a basic type (bool, int, string, etc.) and sets the
-// value to "data" of that type.
-func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
- if val.IsValid() && val.Elem().IsValid() {
- return d.decode(name, data, val.Elem())
- }
-
- dataVal := reflect.ValueOf(data)
-
- // If the input data is a pointer, and the assigned type is the dereference
- // of that exact pointer, then indirect it so that we can assign it.
- // Example: *string to string
- if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
- dataVal = reflect.Indirect(dataVal)
- }
-
- if !dataVal.IsValid() {
- dataVal = reflect.Zero(val.Type())
- }
-
- dataValType := dataVal.Type()
- if !dataValType.AssignableTo(val.Type()) {
- return fmt.Errorf(
- "'%s' expected type '%s', got '%s'",
- name, val.Type(), dataValType)
- }
-
- val.Set(dataVal)
- return nil
-}
-
-func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- converted := true
- switch {
- case dataKind == reflect.String:
- val.SetString(dataVal.String())
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetString("1")
- } else {
- val.SetString("0")
- }
- case dataKind == reflect.Int && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatInt(dataVal.Int(), 10))
- case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
- case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
- case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
- dataKind == reflect.Array && d.config.WeaklyTypedInput:
- dataType := dataVal.Type()
- elemKind := dataType.Elem().Kind()
- switch elemKind {
- case reflect.Uint8:
- var uints []uint8
- if dataKind == reflect.Array {
- uints = make([]uint8, dataVal.Len(), dataVal.Len())
- for i := range uints {
- uints[i] = dataVal.Index(i).Interface().(uint8)
- }
- } else {
- uints = dataVal.Interface().([]uint8)
- }
- val.SetString(string(uints))
- default:
- converted = false
- }
- default:
- converted = false
- }
-
- if !converted {
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
- }
-
- return nil
-}
-
-func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- val.SetInt(dataVal.Int())
- case dataKind == reflect.Uint:
- val.SetInt(int64(dataVal.Uint()))
- case dataKind == reflect.Float32:
- val.SetInt(int64(dataVal.Float()))
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetInt(1)
- } else {
- val.SetInt(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
- if err == nil {
- val.SetInt(i)
- } else {
- return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := jn.Int64()
- if err != nil {
- return fmt.Errorf(
- "error decoding json.Number into %s: %s", name, err)
- }
- val.SetInt(i)
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
- }
-
- return nil
-}
-
-func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- switch {
- case dataKind == reflect.Int:
- i := dataVal.Int()
- if i < 0 && !d.config.WeaklyTypedInput {
- return fmt.Errorf("cannot parse '%s', %d overflows uint",
- name, i)
- }
- val.SetUint(uint64(i))
- case dataKind == reflect.Uint:
- val.SetUint(dataVal.Uint())
- case dataKind == reflect.Float32:
- f := dataVal.Float()
- if f < 0 && !d.config.WeaklyTypedInput {
- return fmt.Errorf("cannot parse '%s', %f overflows uint",
- name, f)
- }
- val.SetUint(uint64(f))
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetUint(1)
- } else {
- val.SetUint(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
- if err == nil {
- val.SetUint(i)
- } else {
- return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
- }
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
- }
-
- return nil
-}
-
-func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- switch {
- case dataKind == reflect.Bool:
- val.SetBool(dataVal.Bool())
- case dataKind == reflect.Int && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Int() != 0)
- case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Uint() != 0)
- case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Float() != 0)
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- b, err := strconv.ParseBool(dataVal.String())
- if err == nil {
- val.SetBool(b)
- } else if dataVal.String() == "" {
- val.SetBool(false)
- } else {
- return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
- }
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
- }
-
- return nil
-}
-
-func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- val.SetFloat(float64(dataVal.Int()))
- case dataKind == reflect.Uint:
- val.SetFloat(float64(dataVal.Uint()))
- case dataKind == reflect.Float32:
- val.SetFloat(dataVal.Float())
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetFloat(1)
- } else {
- val.SetFloat(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
- if err == nil {
- val.SetFloat(f)
- } else {
- return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := jn.Float64()
- if err != nil {
- return fmt.Errorf(
- "error decoding json.Number into %s: %s", name, err)
- }
- val.SetFloat(i)
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
- valType := val.Type()
- valKeyType := valType.Key()
- valElemType := valType.Elem()
-
- // By default we overwrite keys in the current map
- valMap := val
-
- // If the map is nil or we're purposely zeroing fields, make a new map
- if valMap.IsNil() || d.config.ZeroFields {
- // Make a new map to hold our result
- mapType := reflect.MapOf(valKeyType, valElemType)
- valMap = reflect.MakeMap(mapType)
- }
-
- // Check input type and based on the input type jump to the proper func
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- switch dataVal.Kind() {
- case reflect.Map:
- return d.decodeMapFromMap(name, dataVal, val, valMap)
-
- case reflect.Struct:
- return d.decodeMapFromStruct(name, dataVal, val, valMap)
-
- case reflect.Array, reflect.Slice:
- if d.config.WeaklyTypedInput {
- return d.decodeMapFromSlice(name, dataVal, val, valMap)
- }
-
- fallthrough
-
- default:
- return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
- }
-}
-
-func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- // Special case for BC reasons (covered by tests)
- if dataVal.Len() == 0 {
- val.Set(valMap)
- return nil
- }
-
- for i := 0; i < dataVal.Len(); i++ {
- err := d.decode(
- fmt.Sprintf("%s[%d]", name, i),
- dataVal.Index(i).Interface(), val)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- valType := val.Type()
- valKeyType := valType.Key()
- valElemType := valType.Elem()
-
- // Accumulate errors
- errors := make([]string, 0)
-
- // If the input data is empty, then we just match what the input data is.
- if dataVal.Len() == 0 {
- if dataVal.IsNil() {
- if !val.IsNil() {
- val.Set(dataVal)
- }
- } else {
- // Set to empty allocated value
- val.Set(valMap)
- }
-
- return nil
- }
-
- for _, k := range dataVal.MapKeys() {
- fieldName := fmt.Sprintf("%s[%s]", name, k)
-
- // First decode the key into the proper type
- currentKey := reflect.Indirect(reflect.New(valKeyType))
- if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
- errors = appendErrors(errors, err)
- continue
- }
-
- // Next decode the data into the proper type
- v := dataVal.MapIndex(k).Interface()
- currentVal := reflect.Indirect(reflect.New(valElemType))
- if err := d.decode(fieldName, v, currentVal); err != nil {
- errors = appendErrors(errors, err)
- continue
- }
-
- valMap.SetMapIndex(currentKey, currentVal)
- }
-
- // Set the built up map to the value
- val.Set(valMap)
-
- // If we had errors, return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- typ := dataVal.Type()
- for i := 0; i < typ.NumField(); i++ {
- // Get the StructField first since this is a cheap operation. If the
- // field is unexported, then ignore it.
- f := typ.Field(i)
- if f.PkgPath != "" {
- continue
- }
-
- // Next get the actual value of this field and verify it is assignable
- // to the map value.
- v := dataVal.Field(i)
- if !v.Type().AssignableTo(valMap.Type().Elem()) {
- return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
- }
-
- tagValue := f.Tag.Get(d.config.TagName)
- tagParts := strings.Split(tagValue, ",")
-
- // Determine the name of the key in the map
- keyName := f.Name
- if tagParts[0] != "" {
- if tagParts[0] == "-" {
- continue
- }
- keyName = tagParts[0]
- }
-
- // If "squash" is specified in the tag, we squash the field down.
- squash := false
- for _, tag := range tagParts[1:] {
- if tag == "squash" {
- squash = true
- break
- }
- }
- if squash && v.Kind() != reflect.Struct {
- return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
- }
-
- switch v.Kind() {
- // this is an embedded struct, so handle it differently
- case reflect.Struct:
- x := reflect.New(v.Type())
- x.Elem().Set(v)
-
- vType := valMap.Type()
- vKeyType := vType.Key()
- vElemType := vType.Elem()
- mType := reflect.MapOf(vKeyType, vElemType)
- vMap := reflect.MakeMap(mType)
-
- err := d.decode(keyName, x.Interface(), vMap)
- if err != nil {
- return err
- }
-
- if squash {
- for _, k := range vMap.MapKeys() {
- valMap.SetMapIndex(k, vMap.MapIndex(k))
- }
- } else {
- valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
- }
-
- default:
- valMap.SetMapIndex(reflect.ValueOf(keyName), v)
- }
- }
-
- if val.CanAddr() {
- val.Set(valMap)
- }
-
- return nil
-}
-
-func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
- // If the input data is nil, then we want to just set the output
- // pointer to be nil as well.
- isNil := data == nil
- if !isNil {
- switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
- case reflect.Chan,
- reflect.Func,
- reflect.Interface,
- reflect.Map,
- reflect.Ptr,
- reflect.Slice:
- isNil = v.IsNil()
- }
- }
- if isNil {
- if !val.IsNil() && val.CanSet() {
- nilValue := reflect.New(val.Type()).Elem()
- val.Set(nilValue)
- }
-
- return nil
- }
-
- // Create an element of the concrete (non pointer) type and decode
- // into that. Then set the value of the pointer to this type.
- valType := val.Type()
- valElemType := valType.Elem()
- if val.CanSet() {
- realVal := val
- if realVal.IsNil() || d.config.ZeroFields {
- realVal = reflect.New(valElemType)
- }
-
- if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
- return err
- }
-
- val.Set(realVal)
- } else {
- if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
- // Create an element of the concrete (non pointer) type and decode
- // into that. Then set the value of the pointer to this type.
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- if val.Type() != dataVal.Type() {
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s'",
- name, val.Type(), dataVal.Type())
- }
- val.Set(dataVal)
- return nil
-}
-
-func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataValKind := dataVal.Kind()
- valType := val.Type()
- valElemType := valType.Elem()
- sliceType := reflect.SliceOf(valElemType)
-
- valSlice := val
- if valSlice.IsNil() || d.config.ZeroFields {
- if d.config.WeaklyTypedInput {
- switch {
- // Slice and array we use the normal logic
- case dataValKind == reflect.Slice, dataValKind == reflect.Array:
- break
-
- // Empty maps turn into empty slices
- case dataValKind == reflect.Map:
- if dataVal.Len() == 0 {
- val.Set(reflect.MakeSlice(sliceType, 0, 0))
- return nil
- }
- // Create slice of maps of other sizes
- return d.decodeSlice(name, []interface{}{data}, val)
-
- case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
- return d.decodeSlice(name, []byte(dataVal.String()), val)
-
- // All other types we try to convert to the slice type
- // and "lift" it into it. i.e. a string becomes a string slice.
- default:
- // Just re-try this function with data as a slice.
- return d.decodeSlice(name, []interface{}{data}, val)
- }
- }
-
- // Check input type
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- return fmt.Errorf(
- "'%s': source data must be an array or slice, got %s", name, dataValKind)
-
- }
-
- // If the input value is empty, then don't allocate since non-nil != nil
- if dataVal.Len() == 0 {
- return nil
- }
-
- // Make a new slice to hold our result, same size as the original data.
- valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
- }
-
- // Accumulate any errors
- errors := make([]string, 0)
-
- for i := 0; i < dataVal.Len(); i++ {
- currentData := dataVal.Index(i).Interface()
- for valSlice.Len() <= i {
- valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
- }
- currentField := valSlice.Index(i)
-
- fieldName := fmt.Sprintf("%s[%d]", name, i)
- if err := d.decode(fieldName, currentData, currentField); err != nil {
- errors = appendErrors(errors, err)
- }
- }
-
- // Finally, set the value to the slice we built up
- val.Set(valSlice)
-
- // If there were errors, we return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
-}
-
-func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataValKind := dataVal.Kind()
- valType := val.Type()
- valElemType := valType.Elem()
- arrayType := reflect.ArrayOf(valType.Len(), valElemType)
-
- valArray := val
-
- if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
- // Check input type
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- if d.config.WeaklyTypedInput {
- switch {
- // Empty maps turn into empty arrays
- case dataValKind == reflect.Map:
- if dataVal.Len() == 0 {
- val.Set(reflect.Zero(arrayType))
- return nil
- }
-
- // All other types we try to convert to the array type
- // and "lift" it into it. i.e. a string becomes a string array.
- default:
- // Just re-try this function with data as a slice.
- return d.decodeArray(name, []interface{}{data}, val)
- }
- }
-
- return fmt.Errorf(
- "'%s': source data must be an array or slice, got %s", name, dataValKind)
-
- }
- if dataVal.Len() > arrayType.Len() {
- return fmt.Errorf(
- "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
-
- }
-
- // Make a new array to hold our result, same size as the original data.
- valArray = reflect.New(arrayType).Elem()
- }
-
- // Accumulate any errors
- errors := make([]string, 0)
-
- for i := 0; i < dataVal.Len(); i++ {
- currentData := dataVal.Index(i).Interface()
- currentField := valArray.Index(i)
-
- fieldName := fmt.Sprintf("%s[%d]", name, i)
- if err := d.decode(fieldName, currentData, currentField); err != nil {
- errors = appendErrors(errors, err)
- }
- }
-
- // Finally, set the value to the array we built up
- val.Set(valArray)
-
- // If there were errors, we return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
-}
-
-func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
-
- // If the type of the value to write to and the data match directly,
- // then we just set it directly instead of recursing into the structure.
- if dataVal.Type() == val.Type() {
- val.Set(dataVal)
- return nil
- }
-
- dataValKind := dataVal.Kind()
- switch dataValKind {
- case reflect.Map:
- return d.decodeStructFromMap(name, dataVal, val)
-
- case reflect.Struct:
- // Not the most efficient way to do this but we can optimize later if
- // we want to. To convert from struct to struct we go to map first
- // as an intermediary.
- m := make(map[string]interface{})
- mval := reflect.Indirect(reflect.ValueOf(&m))
- if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil {
- return err
- }
-
- result := d.decodeStructFromMap(name, mval, val)
- return result
-
- default:
- return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
- }
-}
-
-func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
- dataValType := dataVal.Type()
- if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
- return fmt.Errorf(
- "'%s' needs a map with string keys, has '%s' keys",
- name, dataValType.Key().Kind())
- }
-
- dataValKeys := make(map[reflect.Value]struct{})
- dataValKeysUnused := make(map[interface{}]struct{})
- for _, dataValKey := range dataVal.MapKeys() {
- dataValKeys[dataValKey] = struct{}{}
- dataValKeysUnused[dataValKey.Interface()] = struct{}{}
- }
-
- errors := make([]string, 0)
-
- // This slice will keep track of all the structs we'll be decoding.
- // There can be more than one struct if there are embedded structs
- // that are squashed.
- structs := make([]reflect.Value, 1, 5)
- structs[0] = val
-
- // Compile the list of all the fields that we're going to be decoding
- // from all the structs.
- type field struct {
- field reflect.StructField
- val reflect.Value
- }
- fields := []field{}
- for len(structs) > 0 {
- structVal := structs[0]
- structs = structs[1:]
-
- structType := structVal.Type()
-
- for i := 0; i < structType.NumField(); i++ {
- fieldType := structType.Field(i)
- fieldKind := fieldType.Type.Kind()
-
- // If "squash" is specified in the tag, we squash the field down.
- squash := false
- tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
- for _, tag := range tagParts[1:] {
- if tag == "squash" {
- squash = true
- break
- }
- }
-
- if squash {
- if fieldKind != reflect.Struct {
- errors = appendErrors(errors,
- fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
- } else {
- structs = append(structs, structVal.FieldByName(fieldType.Name))
- }
- continue
- }
-
- // Normal struct field, store it away
- fields = append(fields, field{fieldType, structVal.Field(i)})
- }
- }
-
- // for fieldType, field := range fields {
- for _, f := range fields {
- field, fieldValue := f.field, f.val
- fieldName := field.Name
-
- tagValue := field.Tag.Get(d.config.TagName)
- tagValue = strings.SplitN(tagValue, ",", 2)[0]
- if tagValue != "" {
- fieldName = tagValue
- }
-
- rawMapKey := reflect.ValueOf(fieldName)
- rawMapVal := dataVal.MapIndex(rawMapKey)
- if !rawMapVal.IsValid() {
- // Do a slower search by iterating over each key and
- // doing case-insensitive search.
- for dataValKey := range dataValKeys {
- mK, ok := dataValKey.Interface().(string)
- if !ok {
- // Not a string key
- continue
- }
-
- if strings.EqualFold(mK, fieldName) {
- rawMapKey = dataValKey
- rawMapVal = dataVal.MapIndex(dataValKey)
- break
- }
- }
-
- if !rawMapVal.IsValid() {
- // There was no matching key in the map for the value in
- // the struct. Just ignore.
- continue
- }
- }
-
- // Delete the key we're using from the unused map so we stop tracking
- delete(dataValKeysUnused, rawMapKey.Interface())
-
- if !fieldValue.IsValid() {
- // This should never happen
- panic("field is not valid")
- }
-
- // If we can't set the field, then it is unexported or something,
- // and we just continue onwards.
- if !fieldValue.CanSet() {
- continue
- }
-
- // If the name is empty string, then we're at the root, and we
- // don't dot-join the fields.
- if name != "" {
- fieldName = fmt.Sprintf("%s.%s", name, fieldName)
- }
-
- if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
- errors = appendErrors(errors, err)
- }
- }
-
- if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
- keys := make([]string, 0, len(dataValKeysUnused))
- for rawKey := range dataValKeysUnused {
- keys = append(keys, rawKey.(string))
- }
- sort.Strings(keys)
-
- err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
- errors = appendErrors(errors, err)
- }
-
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- // Add the unused keys to the list of unused keys if we're tracking metadata
- if d.config.Metadata != nil {
- for rawKey := range dataValKeysUnused {
- key := rawKey.(string)
- if name != "" {
- key = fmt.Sprintf("%s.%s", name, key)
- }
-
- d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
- }
- }
-
- return nil
-}
-
-func getKind(val reflect.Value) reflect.Kind {
- kind := val.Kind()
-
- switch {
- case kind >= reflect.Int && kind <= reflect.Int64:
- return reflect.Int
- case kind >= reflect.Uint && kind <= reflect.Uint64:
- return reflect.Uint
- case kind >= reflect.Float32 && kind <= reflect.Float64:
- return reflect.Float32
- default:
- return kind
- }
-}
diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml
deleted file mode 100644
index 4f2ee4d9..00000000
--- a/vendor/github.com/mitchellh/reflectwalk/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE
deleted file mode 100644
index f9c841a5..00000000
--- a/vendor/github.com/mitchellh/reflectwalk/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md
deleted file mode 100644
index ac82cd2e..00000000
--- a/vendor/github.com/mitchellh/reflectwalk/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# reflectwalk
-
-reflectwalk is a Go library for "walking" a value in Go using reflection,
-in the same way a directory tree can be "walked" on the filesystem. Walking
-a complex structure can allow you to do manipulations on unknown structures
-such as those decoded from JSON.
diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod
deleted file mode 100644
index 52bb7c46..00000000
--- a/vendor/github.com/mitchellh/reflectwalk/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/mitchellh/reflectwalk
diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go
deleted file mode 100644
index 6a7f1761..00000000
--- a/vendor/github.com/mitchellh/reflectwalk/location.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package reflectwalk
-
-//go:generate stringer -type=Location location.go
-
-type Location uint
-
-const (
- None Location = iota
- Map
- MapKey
- MapValue
- Slice
- SliceElem
- Array
- ArrayElem
- Struct
- StructField
- WalkLoc
-)
diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go
deleted file mode 100644
index 70760cf4..00000000
--- a/vendor/github.com/mitchellh/reflectwalk/location_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type=Location location.go"; DO NOT EDIT.
-
-package reflectwalk
-
-import "fmt"
-
-const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"
-
-var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73}
-
-func (i Location) String() string {
- if i >= Location(len(_Location_index)-1) {
- return fmt.Sprintf("Location(%d)", i)
- }
- return _Location_name[_Location_index[i]:_Location_index[i+1]]
-}
diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
deleted file mode 100644
index d7ab7b6d..00000000
--- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
+++ /dev/null
@@ -1,401 +0,0 @@
-// reflectwalk is a package that allows you to "walk" complex structures
-// similar to how you may "walk" a filesystem: visiting every element one
-// by one and calling callback functions allowing you to handle and manipulate
-// those elements.
-package reflectwalk
-
-import (
- "errors"
- "reflect"
-)
-
-// PrimitiveWalker implementations are able to handle primitive values
-// within complex structures. Primitive values are numbers, strings,
-// booleans, funcs, chans.
-//
-// These primitive values are often members of more complex
-// structures (slices, maps, etc.) that are walkable by other interfaces.
-type PrimitiveWalker interface {
- Primitive(reflect.Value) error
-}
-
-// InterfaceWalker implementations are able to handle interface values as they
-// are encountered during the walk.
-type InterfaceWalker interface {
- Interface(reflect.Value) error
-}
-
-// MapWalker implementations are able to handle individual elements
-// found within a map structure.
-type MapWalker interface {
- Map(m reflect.Value) error
- MapElem(m, k, v reflect.Value) error
-}
-
-// SliceWalker implementations are able to handle slice elements found
-// within complex structures.
-type SliceWalker interface {
- Slice(reflect.Value) error
- SliceElem(int, reflect.Value) error
-}
-
-// ArrayWalker implementations are able to handle array elements found
-// within complex structures.
-type ArrayWalker interface {
- Array(reflect.Value) error
- ArrayElem(int, reflect.Value) error
-}
-
-// StructWalker is an interface that has methods that are called for
-// structs when a Walk is done.
-type StructWalker interface {
- Struct(reflect.Value) error
- StructField(reflect.StructField, reflect.Value) error
-}
-
-// EnterExitWalker implementations are notified before and after
-// they walk deeper into complex structures (into struct fields,
-// into slice elements, etc.)
-type EnterExitWalker interface {
- Enter(Location) error
- Exit(Location) error
-}
-
-// PointerWalker implementations are notified when the value they're
-// walking is a pointer or not. Pointer is called for _every_ value whether
-// it is a pointer or not.
-type PointerWalker interface {
- PointerEnter(bool) error
- PointerExit(bool) error
-}
-
-// SkipEntry can be returned from walk functions to skip walking
-// the value of this field. This is only valid in the following functions:
-//
-// - Struct: skips all fields from being walked
-// - StructField: skips walking the struct value
-//
-var SkipEntry = errors.New("skip this entry")
-
-// Walk takes an arbitrary value and an interface and traverses the
-// value, calling callbacks on the interface if they are supported.
-// The interface should implement one or more of the walker interfaces
-// in this package, such as PrimitiveWalker, StructWalker, etc.
-func Walk(data, walker interface{}) (err error) {
- v := reflect.ValueOf(data)
- ew, ok := walker.(EnterExitWalker)
- if ok {
- err = ew.Enter(WalkLoc)
- }
-
- if err == nil {
- err = walk(v, walker)
- }
-
- if ok && err == nil {
- err = ew.Exit(WalkLoc)
- }
-
- return
-}
-
-func walk(v reflect.Value, w interface{}) (err error) {
- // Determine if we're receiving a pointer and if so notify the walker.
- // The logic here is convoluted but very important (tests will fail if
- // almost any part is changed). I will try to explain here.
- //
- // First, we check if the value is an interface, if so, we really need
- // to check the interface's VALUE to see whether it is a pointer.
- //
- // Check whether the value is then a pointer. If so, then set pointer
- // to true to notify the user.
- //
- // If we still have a pointer or an interface after the indirections, then
- // we unwrap another level
- //
- // At this time, we also set "v" to be the dereferenced value. This is
- // because once we've unwrapped the pointer we want to use that value.
- pointer := false
- pointerV := v
-
- for {
- if pointerV.Kind() == reflect.Interface {
- if iw, ok := w.(InterfaceWalker); ok {
- if err = iw.Interface(pointerV); err != nil {
- return
- }
- }
-
- pointerV = pointerV.Elem()
- }
-
- if pointerV.Kind() == reflect.Ptr {
- pointer = true
- v = reflect.Indirect(pointerV)
- }
- if pw, ok := w.(PointerWalker); ok {
- if err = pw.PointerEnter(pointer); err != nil {
- return
- }
-
- defer func(pointer bool) {
- if err != nil {
- return
- }
-
- err = pw.PointerExit(pointer)
- }(pointer)
- }
-
- if pointer {
- pointerV = v
- }
- pointer = false
-
- // If we still have a pointer or interface we have to indirect another level.
- switch pointerV.Kind() {
- case reflect.Ptr, reflect.Interface:
- continue
- }
- break
- }
-
- // We preserve the original value here because if it is an interface
- // type, we want to pass that directly into the walkPrimitive, so that
- // we can set it.
- originalV := v
- if v.Kind() == reflect.Interface {
- v = v.Elem()
- }
-
- k := v.Kind()
- if k >= reflect.Int && k <= reflect.Complex128 {
- k = reflect.Int
- }
-
- switch k {
- // Primitives
- case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
- err = walkPrimitive(originalV, w)
- return
- case reflect.Map:
- err = walkMap(v, w)
- return
- case reflect.Slice:
- err = walkSlice(v, w)
- return
- case reflect.Struct:
- err = walkStruct(v, w)
- return
- case reflect.Array:
- err = walkArray(v, w)
- return
- default:
- panic("unsupported type: " + k.String())
- }
-}
-
-func walkMap(v reflect.Value, w interface{}) error {
- ew, ewok := w.(EnterExitWalker)
- if ewok {
- ew.Enter(Map)
- }
-
- if mw, ok := w.(MapWalker); ok {
- if err := mw.Map(v); err != nil {
- return err
- }
- }
-
- for _, k := range v.MapKeys() {
- kv := v.MapIndex(k)
-
- if mw, ok := w.(MapWalker); ok {
- if err := mw.MapElem(v, k, kv); err != nil {
- return err
- }
- }
-
- ew, ok := w.(EnterExitWalker)
- if ok {
- ew.Enter(MapKey)
- }
-
- if err := walk(k, w); err != nil {
- return err
- }
-
- if ok {
- ew.Exit(MapKey)
- ew.Enter(MapValue)
- }
-
- if err := walk(kv, w); err != nil {
- return err
- }
-
- if ok {
- ew.Exit(MapValue)
- }
- }
-
- if ewok {
- ew.Exit(Map)
- }
-
- return nil
-}
-
-func walkPrimitive(v reflect.Value, w interface{}) error {
- if pw, ok := w.(PrimitiveWalker); ok {
- return pw.Primitive(v)
- }
-
- return nil
-}
-
-func walkSlice(v reflect.Value, w interface{}) (err error) {
- ew, ok := w.(EnterExitWalker)
- if ok {
- ew.Enter(Slice)
- }
-
- if sw, ok := w.(SliceWalker); ok {
- if err := sw.Slice(v); err != nil {
- return err
- }
- }
-
- for i := 0; i < v.Len(); i++ {
- elem := v.Index(i)
-
- if sw, ok := w.(SliceWalker); ok {
- if err := sw.SliceElem(i, elem); err != nil {
- return err
- }
- }
-
- ew, ok := w.(EnterExitWalker)
- if ok {
- ew.Enter(SliceElem)
- }
-
- if err := walk(elem, w); err != nil {
- return err
- }
-
- if ok {
- ew.Exit(SliceElem)
- }
- }
-
- ew, ok = w.(EnterExitWalker)
- if ok {
- ew.Exit(Slice)
- }
-
- return nil
-}
-
-func walkArray(v reflect.Value, w interface{}) (err error) {
- ew, ok := w.(EnterExitWalker)
- if ok {
- ew.Enter(Array)
- }
-
- if aw, ok := w.(ArrayWalker); ok {
- if err := aw.Array(v); err != nil {
- return err
- }
- }
-
- for i := 0; i < v.Len(); i++ {
- elem := v.Index(i)
-
- if aw, ok := w.(ArrayWalker); ok {
- if err := aw.ArrayElem(i, elem); err != nil {
- return err
- }
- }
-
- ew, ok := w.(EnterExitWalker)
- if ok {
- ew.Enter(ArrayElem)
- }
-
- if err := walk(elem, w); err != nil {
- return err
- }
-
- if ok {
- ew.Exit(ArrayElem)
- }
- }
-
- ew, ok = w.(EnterExitWalker)
- if ok {
- ew.Exit(Array)
- }
-
- return nil
-}
-
-func walkStruct(v reflect.Value, w interface{}) (err error) {
- ew, ewok := w.(EnterExitWalker)
- if ewok {
- ew.Enter(Struct)
- }
-
- skip := false
- if sw, ok := w.(StructWalker); ok {
- err = sw.Struct(v)
- if err == SkipEntry {
- skip = true
- err = nil
- }
- if err != nil {
- return
- }
- }
-
- if !skip {
- vt := v.Type()
- for i := 0; i < vt.NumField(); i++ {
- sf := vt.Field(i)
- f := v.FieldByIndex([]int{i})
-
- if sw, ok := w.(StructWalker); ok {
- err = sw.StructField(sf, f)
-
- // SkipEntry just pretends this field doesn't even exist
- if err == SkipEntry {
- continue
- }
-
- if err != nil {
- return
- }
- }
-
- ew, ok := w.(EnterExitWalker)
- if ok {
- ew.Enter(StructField)
- }
-
- err = walk(f, w)
- if err != nil {
- return
- }
-
- if ok {
- ew.Exit(StructField)
- }
- }
- }
-
- if ewok {
- ew.Exit(Struct)
- }
-
- return nil
-}
diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore
deleted file mode 100644
index a1338d68..00000000
--- a/vendor/github.com/oklog/run/.gitignore
+++ /dev/null
@@ -1,14 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml
deleted file mode 100644
index 362bdd41..00000000
--- a/vendor/github.com/oklog/run/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-sudo: false
-go:
- - 1.x
- - tip
-install:
- - go get -v github.com/golang/lint/golint
- - go build ./...
-script:
- - go vet ./...
- - $HOME/gopath/bin/golint .
- - go test -v -race ./...
diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/github.com/oklog/run/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md
deleted file mode 100644
index a7228cd9..00000000
--- a/vendor/github.com/oklog/run/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# run
-
-[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run)
-[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run)
-[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run)
-[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE)
-
-run.Group is a universal mechanism to manage goroutine lifecycles.
-
-Create a zero-value run.Group, and then add actors to it. Actors are defined as
-a pair of functions: an **execute** function, which should run synchronously;
-and an **interrupt** function, which, when invoked, should cause the execute
-function to return. Finally, invoke Run, which blocks until the first actor
-returns. This general-purpose API allows callers to model pretty much any
-runnable task, and achieve well-defined lifecycle semantics for the group.
-
-run.Group was written to manage component lifecycles in func main for
-[OK Log](https://github.com/oklog/oklog).
-But it's useful in any circumstance where you need to orchestrate multiple
-goroutines as a unit whole.
-[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a
-video of a talk where run.Group is described.
-
-## Examples
-
-### context.Context
-
-```go
-ctx, cancel := context.WithCancel(context.Background())
-g.Add(func() error {
- return myProcess(ctx, ...)
-}, func(error) {
- cancel()
-})
-```
-
-### net.Listener
-
-```go
-ln, _ := net.Listen("tcp", ":8080")
-g.Add(func() error {
- return http.Serve(ln, nil)
-}, func(error) {
- ln.Close()
-})
-```
-
-### io.ReadCloser
-
-```go
-var conn io.ReadCloser = ...
-g.Add(func() error {
- s := bufio.NewScanner(conn)
- for s.Scan() {
- println(s.Text())
- }
- return s.Err()
-}, func(error) {
- conn.Close()
-})
-```
-
-## Comparisons
-
-Package run is somewhat similar to package
-[errgroup](https://godoc.org/golang.org/x/sync/errgroup),
-except it doesn't require actor goroutines to understand context semantics.
-
-It's somewhat similar to package
-[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or
-[tomb.v2](https://godoc.org/gopkg.in/tomb.v2),
-except it has a much smaller API surface, delegating e.g. staged shutdown of
-goroutines to the caller.
diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go
deleted file mode 100644
index 832d47dd..00000000
--- a/vendor/github.com/oklog/run/group.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Package run implements an actor-runner with deterministic teardown. It is
-// somewhat similar to package errgroup, except it does not require actor
-// goroutines to understand context semantics. This makes it suitable for use in
-// more circumstances; for example, goroutines which are handling connections
-// from net.Listeners, or scanning input from a closable io.Reader.
-package run
-
-// Group collects actors (functions) and runs them concurrently.
-// When one actor (function) returns, all actors are interrupted.
-// The zero value of a Group is useful.
-type Group struct {
- actors []actor
-}
-
-// Add an actor (function) to the group. Each actor must be pre-emptable by an
-// interrupt function. That is, if interrupt is invoked, execute should return.
-// Also, it must be safe to call interrupt even after execute has returned.
-//
-// The first actor (function) to return interrupts all running actors.
-// The error is passed to the interrupt functions, and is returned by Run.
-func (g *Group) Add(execute func() error, interrupt func(error)) {
- g.actors = append(g.actors, actor{execute, interrupt})
-}
-
-// Run all actors (functions) concurrently.
-// When the first actor returns, all others are interrupted.
-// Run only returns when all actors have exited.
-// Run returns the error returned by the first exiting actor.
-func (g *Group) Run() error {
- if len(g.actors) == 0 {
- return nil
- }
-
- // Run each actor.
- errors := make(chan error, len(g.actors))
- for _, a := range g.actors {
- go func(a actor) {
- errors <- a.execute()
- }(a)
- }
-
- // Wait for the first actor to stop.
- err := <-errors
-
- // Signal all actors to stop.
- for _, a := range g.actors {
- a.interrupt(err)
- }
-
- // Wait for all actors to stop.
- for i := 1; i < cap(errors); i++ {
- <-errors
- }
-
- // Return the original error.
- return err
-}
-
-type actor struct {
- execute func() error
- interrupt func(error)
-}
diff --git a/vendor/github.com/onsi/ginkgo/.gitignore b/vendor/github.com/onsi/ginkgo/.gitignore
deleted file mode 100644
index b9f9659d..00000000
--- a/vendor/github.com/onsi/ginkgo/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-.DS_Store
-TODO
-tmp/**/*
-*.coverprofile
-.vscode
-.idea/
-*.log
diff --git a/vendor/github.com/onsi/ginkgo/.travis.yml b/vendor/github.com/onsi/ginkgo/.travis.yml
deleted file mode 100644
index 3900878b..00000000
--- a/vendor/github.com/onsi/ginkgo/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-go:
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - 1.11.x
-
-install:
- - go get -v -t ./...
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/onsi/gomega
- - go install github.com/onsi/ginkgo/ginkgo
- - export PATH=$PATH:$HOME/gopath/bin
-
-script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
deleted file mode 100644
index d7d79701..00000000
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ /dev/null
@@ -1,207 +0,0 @@
-## 1.7.0
-
-### New Features
-- Add JustAfterEach (#484) [0d4f080]
-
-### Fixes
-- Correctly round suite time in junit reporter [2445fc1]
-- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
-
-## 1.6.0
-
-### New Features
-- add --debug flag to emit node output to files (#499) [39febac]
-
-### Fixes
-- fix: for `go vet` to pass [69338ec]
-- docs: fix for contributing instructions [7004cb1]
-- consolidate and streamline contribution docs (#494) [d848015]
-- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
-- all: gofmt [000d317]
-- Increase eventually timeout to 30s [c73579c]
-- Clarify asynchronous test behaviour [294d8f4]
-- Travis badge should only show master [26d2143]
-
-## 1.5.0 5/10/2018
-
-### New Features
-- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
-- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
-- Re-add noisySkippings flag [652e15c]
-- Allow coverage to be displayed for focused specs (#367) [11459a8]
-- Handle -outputdir flag (#364) [228e3a8]
-- Handle -coverprofile flag (#355) [43392d5]
-
-### Fixes
-- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
-- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
-- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
-- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
-- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
-- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
-- Add an extra new line after reporting spec run completion for test2json [874520d]
-- added name name field to junit reported testsuite [ae61c63]
-- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
-- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
-- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
-- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
-- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
-- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
-- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
-- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
-- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
-- Replace GOPATH in Environment [4b883f0]
-
-
-## 1.4.0 7/16/2017
-
-- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
-- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
-- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
-- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
-- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
-- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
-
-## 1.3.0 3/28/2017
-
-Improvements:
-
-- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
-- `Skip(message)` can be used to skip the current test.
-- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
-- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
-- Support for retrying flaky tests with `--flakeAttempts`
-- `ginkgo ./...` now recurses as you'd expect
-- Added `Specify` a synonym for `It`
-- Support colorise on Windows
-- Broader support for various go compilation flags in the `ginkgo` CLI
-
-Bug Fixes:
-
-- Ginkgo tests now fail when you `panic(nil)` (#167)
-
-## 1.2.0 5/31/2015
-
-Improvements
-
-- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
-- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
-- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
-
-## 1.2.0-beta
-
-Ginkgo now requires Go 1.4+
-
-Improvements:
-
-- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
-- Improved focus behavior. Now, this:
-
- ```golang
- FDescribe("Some describe", func() {
- It("A", func() {})
-
- FIt("B", func() {})
- })
- ```
-
- will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
-- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
-- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
-- Improved output when an error occurs in a setup or teardown block.
-- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
-- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
-- Add support for precompiled tests:
- - `ginkgo build ` will now compile the package, producing a file named `package.test`
- - The compiled `package.test` file can be run directly. This runs the tests in series.
- - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
-- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
-- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
-- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
-- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
-- `ginkgo -notify` now works on Linux
-
-Bug Fixes:
-
-- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
-- Fix tempfile leak when running in parallel
-- Fix incorrect failure message when a panic occurs during a parallel test run
-- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
-- Be more consistent about handling SIGTERM as well as SIGINT
-- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
-- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
-
-## 1.1.0 (8/2/2014)
-
-No changes, just dropping the beta.
-
-## 1.1.0-beta (7/22/2014)
-New Features:
-
-- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
-- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
-- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
-- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
-- `ginkgo --failFast` aborts the test suite after the first failure.
-- `ginkgo generate file_1 file_2` can take multiple file arguments.
-- Ginkgo now summarizes any spec failures that occured at the end of the test run.
-- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
-
-Improvements:
-
-- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
-- `ginkgo --untilItFails` no longer recompiles between attempts.
-- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
-
-Bug Fixes:
-
-- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
-- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
-
-## 1.0.0 (5/24/2014)
-New Features:
-
-- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
-
-Improvements:
-
-- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
-- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
-
-Bug Fixes:
-
-- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
-- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
-- Fix all remaining race conditions in Ginkgo's test suite.
-
-## 1.0.0-beta (4/14/2014)
-Breaking changes:
-
-- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
-- Modified the Reporter interface
-- `watch` is now a subcommand, not a flag.
-
-DSL changes:
-
-- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
-- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
-- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
-
-CLI changes:
-
-- `watch` is now a subcommand, not a flag
-- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
-- Additional arguments can be passed to specs. Pass them after the `--` separator
-- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
-- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
-
-Misc:
-
-- Start using semantic versioning
-- Start maintaining changelog
-
-Major refactor:
-
-- Pull out Ginkgo's internal to `internal`
-- Rename `example` everywhere to `spec`
-- Much more!
diff --git a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
deleted file mode 100644
index 908b95c2..00000000
--- a/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Contributing to Ginkgo
-
-Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
-
-- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
-- Ensure adequate test coverage:
- - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
- - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
-- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
- If relevant, please submit a docs PR to that branch alongside your code PR.
-
-Thanks for supporting Ginkgo!
-
-## Setup
-
-Fork the repo, then:
-
-```
-go get github.com/onsi/ginkgo
-go get github.com/onsi/gomega/...
-cd $GOPATH/src/github.com/onsi/ginkgo
-git remote add fork git@github.com:/ginkgo.git
-
-ginkgo -r -p # ensure tests are green
-go vet ./... # ensure linter is happy
-```
-
-## Making the PR
- - go to a new branch `git checkout -b my-feature`
- - make your changes
- - run tests and linter again (see above)
- - `git push fork`
- - open PR 🎉
diff --git a/vendor/github.com/onsi/ginkgo/LICENSE b/vendor/github.com/onsi/ginkgo/LICENSE
deleted file mode 100644
index 9415ee72..00000000
--- a/vendor/github.com/onsi/ginkgo/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2013-2014 Onsi Fakhouri
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
deleted file mode 100644
index cdf8d054..00000000
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ /dev/null
@@ -1,121 +0,0 @@
-![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
-
-[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
-
-Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
-
-If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
-
-## Feature List
-
-- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
-
-- Structure your BDD-style tests expressively:
- - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
-
-- A comprehensive test runner that lets you:
- - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
- - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
-
-- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- - `ginkgo -cover` runs your tests using Go's code coverage tool
- - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- - `ginkgo -r` runs all tests suites under the current directory
- - `ginkgo -v` prints out identifying information for each tests just before it runs
-
- And much more: run `ginkgo help` for details!
-
- The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
-
-- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
-
-- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
-
-- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
-
-- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
-
-- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
-
-- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
-
-- A modular architecture that lets you easily:
- - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
-
-## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
-
-Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
-
-## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
-
-Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
-
-## Set Me Up!
-
-You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
-
-```bash
-
-go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
-go get -u github.com/onsi/gomega/... # fetches the matcher library
-
-cd path/to/package/you/want/to/test
-
-ginkgo bootstrap # set up a new ginkgo suite
-ginkgo generate # will create a sample test file. edit this file and add your tests then...
-
-go test # to run your tests
-
-ginkgo # also runs your tests
-
-```
-
-## I'm new to Go: What are my testing options?
-
-Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
-
-With that said, it's great to know what your options are :)
-
-### What Go gives you out of the box
-
-Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
-
-### Matcher libraries for Go's XUnit style tests
-
-A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
-
-- [testify](https://github.com/stretchr/testify)
-- [gocheck](http://labix.org/gocheck)
-
-You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
-
-### BDD style testing frameworks
-
-There are a handful of BDD-style testing frameworks written for Go. Here are a few:
-
-- [Ginkgo](https://github.com/onsi/ginkgo) ;)
-- [GoConvey](https://github.com/smartystreets/goconvey)
-- [Goblin](https://github.com/franela/goblin)
-- [Mao](https://github.com/azer/mao)
-- [Zen](https://github.com/pranavraja/zen)
-
-Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
-
-Go explore!
-
-## License
-
-Ginkgo is MIT-Licensed
-
-## Contributing
-
-See [CONTRIBUTING.md](CONTRIBUTING.md)
diff --git a/vendor/github.com/onsi/ginkgo/RELEASING.md b/vendor/github.com/onsi/ginkgo/RELEASING.md
deleted file mode 100644
index 1e298c2d..00000000
--- a/vendor/github.com/onsi/ginkgo/RELEASING.md
+++ /dev/null
@@ -1,14 +0,0 @@
-A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
-
-1. Ensure CHANGELOG.md is up to date.
- - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
- - Categorize the changes into
- - Breaking Changes (requires a major version)
- - New Features (minor version)
- - Fixes (fix version)
- - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
-1. Update `VERSION` in `config/config.go`
-1. Create a commit with the version number as the commit message (e.g. `v1.3.0`)
-1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`)
-1. Push the commit and tag to GitHub
-1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
deleted file mode 100644
index 5e509313..00000000
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
-Ginkgo accepts a number of configuration options.
-
-These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
-
-You can also learn more via
-
- ginkgo help
-
-or (I kid you not):
-
- go test -asdf
-*/
-package config
-
-import (
- "flag"
- "time"
-
- "fmt"
-)
-
-const VERSION = "1.7.0"
-
-type GinkgoConfigType struct {
- RandomSeed int64
- RandomizeAllSpecs bool
- RegexScansFilePath bool
- FocusString string
- SkipString string
- SkipMeasurements bool
- FailOnPending bool
- FailFast bool
- FlakeAttempts int
- EmitSpecProgress bool
- DryRun bool
- DebugParallel bool
-
- ParallelNode int
- ParallelTotal int
- SyncHost string
- StreamHost string
-}
-
-var GinkgoConfig = GinkgoConfigType{}
-
-type DefaultReporterConfigType struct {
- NoColor bool
- SlowSpecThreshold float64
- NoisyPendings bool
- NoisySkippings bool
- Succinct bool
- Verbose bool
- FullTrace bool
-}
-
-var DefaultReporterConfig = DefaultReporterConfigType{}
-
-func processPrefix(prefix string) string {
- if prefix != "" {
- prefix = prefix + "."
- }
- return prefix
-}
-
-func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
- prefix = processPrefix(prefix)
- flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
- flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
- flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
- flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
- flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
-
- flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
-
- flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
- flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
-
- flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
-
- flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
-
- flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
-
- flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
-
- if includeParallelFlags {
- flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
- flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
- flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
- flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
- }
-
- flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
- flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
- flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
- flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
- flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
- flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
- flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
-}
-
-func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
- prefix = processPrefix(prefix)
- result := make([]string, 0)
-
- if ginkgo.RandomSeed > 0 {
- result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
- }
-
- if ginkgo.RandomizeAllSpecs {
- result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
- }
-
- if ginkgo.SkipMeasurements {
- result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
- }
-
- if ginkgo.FailOnPending {
- result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
- }
-
- if ginkgo.FailFast {
- result = append(result, fmt.Sprintf("--%sfailFast", prefix))
- }
-
- if ginkgo.DryRun {
- result = append(result, fmt.Sprintf("--%sdryRun", prefix))
- }
-
- if ginkgo.FocusString != "" {
- result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
- }
-
- if ginkgo.SkipString != "" {
- result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
- }
-
- if ginkgo.FlakeAttempts > 1 {
- result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
- }
-
- if ginkgo.EmitSpecProgress {
- result = append(result, fmt.Sprintf("--%sprogress", prefix))
- }
-
- if ginkgo.DebugParallel {
- result = append(result, fmt.Sprintf("--%sdebug", prefix))
- }
-
- if ginkgo.ParallelNode != 0 {
- result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
- }
-
- if ginkgo.ParallelTotal != 0 {
- result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
- }
-
- if ginkgo.StreamHost != "" {
- result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
- }
-
- if ginkgo.SyncHost != "" {
- result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
- }
-
- if ginkgo.RegexScansFilePath {
- result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
- }
-
- if reporter.NoColor {
- result = append(result, fmt.Sprintf("--%snoColor", prefix))
- }
-
- if reporter.SlowSpecThreshold > 0 {
- result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
- }
-
- if !reporter.NoisyPendings {
- result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
- }
-
- if !reporter.NoisySkippings {
- result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
- }
-
- if reporter.Verbose {
- result = append(result, fmt.Sprintf("--%sv", prefix))
- }
-
- if reporter.Succinct {
- result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
- }
-
- if reporter.FullTrace {
- result = append(result, fmt.Sprintf("--%strace", prefix))
- }
-
- return result
-}
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
deleted file mode 100644
index 5aa96b4d..00000000
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
-Ginkgo is a BDD-style testing framework for Golang
-
-The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
-
-Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
-
-Ginkgo on Github: http://github.com/onsi/ginkgo
-
-Ginkgo is MIT-Licensed
-*/
-package ginkgo
-
-import (
- "flag"
- "fmt"
- "io"
- "net/http"
- "os"
- "strings"
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/remote"
- "github.com/onsi/ginkgo/internal/suite"
- "github.com/onsi/ginkgo/internal/testingtproxy"
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/reporters/stenographer"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
- "github.com/onsi/ginkgo/types"
-)
-
-const GINKGO_VERSION = config.VERSION
-const GINKGO_PANIC = `
-Your test failed.
-Ginkgo panics to prevent subsequent assertions from running.
-Normally Ginkgo rescues this panic so you shouldn't see it.
-
-But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
-To circumvent this, you should call
-
- defer GinkgoRecover()
-
-at the top of the goroutine that caused this panic.
-`
-const defaultTimeout = 1
-
-var globalSuite *suite.Suite
-var globalFailer *failer.Failer
-
-func init() {
- config.Flags(flag.CommandLine, "ginkgo", true)
- GinkgoWriter = writer.New(os.Stdout)
- globalFailer = failer.New()
- globalSuite = suite.New(globalFailer)
-}
-
-//GinkgoWriter implements an io.Writer
-//When running in verbose mode any writes to GinkgoWriter will be immediately printed
-//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
-//only if the current test fails.
-var GinkgoWriter io.Writer
-
-//The interface by which Ginkgo receives *testing.T
-type GinkgoTestingT interface {
- Fail()
-}
-
-//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
-//useful for seeding your own pseudorandom number generators (PRNGs) to ensure
-//consistent executions from run to run, where your tests contain variability (for
-//example, when selecting random test data).
-func GinkgoRandomSeed() int64 {
- return config.GinkgoConfig.RandomSeed
-}
-
-//GinkgoParallelNode returns the parallel node number for the current ginkgo process
-//The node number is 1-indexed
-func GinkgoParallelNode() int {
- return config.GinkgoConfig.ParallelNode
-}
-
-//Some matcher libraries or legacy codebases require a *testing.T
-//GinkgoT implements an interface analogous to *testing.T and can be used if
-//the library in question accepts *testing.T through an interface
-//
-// For example, with testify:
-// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
-//
-// Or with gomock:
-// gomock.NewController(GinkgoT())
-//
-// GinkgoT() takes an optional offset argument that can be used to get the
-// correct line number associated with the failure.
-func GinkgoT(optionalOffset ...int) GinkgoTInterface {
- offset := 3
- if len(optionalOffset) > 0 {
- offset = optionalOffset[0]
- }
- return testingtproxy.New(GinkgoWriter, Fail, offset)
-}
-
-//The interface returned by GinkgoT(). This covers most of the methods
-//in the testing package's T.
-type GinkgoTInterface interface {
- Fail()
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
- FailNow()
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Log(args ...interface{})
- Logf(format string, args ...interface{})
- Failed() bool
- Parallel()
- Skip(args ...interface{})
- Skipf(format string, args ...interface{})
- SkipNow()
- Skipped() bool
-}
-
-//Custom Ginkgo test reporters must implement the Reporter interface.
-//
-//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
-//and a SpecSummary just before a spec begins and just after a spec ends
-type Reporter reporters.Reporter
-
-//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
-//to tell Ginkgo that your async test is done.
-type Done chan<- interface{}
-
-//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
-// FullTestText: a concatenation of ComponentTexts and the TestText
-// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
-// TestText: the text in the actual It or Measure node
-// IsMeasurement: true if the current test is a measurement
-// FileName: the name of the file containing the current test
-// LineNumber: the line number for the current test
-// Failed: if the current test has failed, this will be true (useful in an AfterEach)
-type GinkgoTestDescription struct {
- FullTestText string
- ComponentTexts []string
- TestText string
-
- IsMeasurement bool
-
- FileName string
- LineNumber int
-
- Failed bool
- Duration time.Duration
-}
-
-//CurrentGinkgoTestDescripton returns information about the current running test.
-func CurrentGinkgoTestDescription() GinkgoTestDescription {
- summary, ok := globalSuite.CurrentRunningSpecSummary()
- if !ok {
- return GinkgoTestDescription{}
- }
-
- subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
-
- return GinkgoTestDescription{
- ComponentTexts: summary.ComponentTexts[1:],
- FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
- TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
- IsMeasurement: summary.IsMeasurement,
- FileName: subjectCodeLocation.FileName,
- LineNumber: subjectCodeLocation.LineNumber,
- Failed: summary.HasFailureState(),
- Duration: summary.RunTime,
- }
-}
-
-//Measurement tests receive a Benchmarker.
-//
-//You use the Time() function to time how long the passed in body function takes to run
-//You use the RecordValue() function to track arbitrary numerical measurements.
-//The RecordValueWithPrecision() function can be used alternatively to provide the unit
-//and resolution of the numeric measurement.
-//The optional info argument is passed to the test reporter and can be used to
-// provide the measurement data to a custom reporter with context.
-//
-//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
-type Benchmarker interface {
- Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
- RecordValue(name string, value float64, info ...interface{})
- RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
-}
-
-//RunSpecs is the entry point for the Ginkgo test runner.
-//You must call this within a Golang testing TestX(t *testing.T) function.
-//
-//To bootstrap a test suite you can use the Ginkgo CLI:
-//
-// ginkgo bootstrap
-func RunSpecs(t GinkgoTestingT, description string) bool {
- specReporters := []Reporter{buildDefaultReporter()}
- return RunSpecsWithCustomReporters(t, description, specReporters)
-}
-
-//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
-//RunSpecs() with this method.
-func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
- specReporters = append(specReporters, buildDefaultReporter())
- return RunSpecsWithCustomReporters(t, description, specReporters)
-}
-
-//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
-//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
-func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
- writer := GinkgoWriter.(*writer.Writer)
- writer.SetStream(config.DefaultReporterConfig.Verbose)
- reporters := make([]reporters.Reporter, len(specReporters))
- for i, reporter := range specReporters {
- reporters[i] = reporter
- }
- passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
- if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
- fmt.Println("PASS | FOCUSED")
- os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
- }
- return passed
-}
-
-func buildDefaultReporter() Reporter {
- remoteReportingServer := config.GinkgoConfig.StreamHost
- if remoteReportingServer == "" {
- stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
- return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
- } else {
- debugFile := ""
- if config.GinkgoConfig.DebugParallel {
- debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
- }
- return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
- }
-}
-
-//Skip notifies Ginkgo that the current spec was skipped.
-func Skip(message string, callerSkip ...int) {
- skip := 0
- if len(callerSkip) > 0 {
- skip = callerSkip[0]
- }
-
- globalFailer.Skip(message, codelocation.New(skip+1))
- panic(GINKGO_PANIC)
-}
-
-//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
-func Fail(message string, callerSkip ...int) {
- skip := 0
- if len(callerSkip) > 0 {
- skip = callerSkip[0]
- }
-
- globalFailer.Fail(message, codelocation.New(skip+1))
- panic(GINKGO_PANIC)
-}
-
-//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
-//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
-//calls out to Gomega
-//
-//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
-//further assertions from running. This panic must be recovered. Ginkgo does this for you
-//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
-//
-//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
-//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
-func GinkgoRecover() {
- e := recover()
- if e != nil {
- globalFailer.Panic(codelocation.New(1), e)
- }
-}
-
-//Describe blocks allow you to organize your specs. A Describe block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func Describe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FDescribe
-func FDescribe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PDescribe
-func PDescribe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XDescribe
-func XDescribe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//Context blocks allow you to organize your specs. A Context block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func Context(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FContext
-func FContext(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PContext
-func PContext(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XContext
-func XContext(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//When blocks allow you to organize your specs. A When block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func When(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FWhen
-func FWhen(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PWhen
-func PWhen(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XWhen
-func XWhen(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
-//within an It block.
-//
-//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
-//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
-func It(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can focus individual Its using FIt
-func FIt(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can mark Its as pending using PIt
-func PIt(text string, _ ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Its as pending using XIt
-func XIt(text string, _ ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//Specify blocks are aliases for It blocks and allow for more natural wording in situations
-//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
-//which apply to It blocks.
-func Specify(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can focus individual Specifys using FSpecify
-func FSpecify(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can mark Specifys as pending using PSpecify
-func PSpecify(text string, is ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Specifys as pending using XSpecify
-func XSpecify(text string, is ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//By allows you to better document large Its.
-//
-//Generally you should try to keep your Its short and to the point. This is not always possible, however,
-//especially in the context of integration tests that capture a particular workflow.
-//
-//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
-//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
-func By(text string, callbacks ...func()) {
- preamble := "\x1b[1mSTEP\x1b[0m"
- if config.DefaultReporterConfig.NoColor {
- preamble = "STEP"
- }
- fmt.Fprintln(GinkgoWriter, preamble+": "+text)
- if len(callbacks) == 1 {
- callbacks[0]()
- }
- if len(callbacks) > 1 {
- panic("just one callback per By, please")
- }
-}
-
-//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
-//and accumulate metrics provided to the Benchmarker by the body function.
-//
-//The body function must have the signature:
-// func(b Benchmarker)
-func Measure(text string, body interface{}, samples int) bool {
- globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
- return true
-}
-
-//You can focus individual Measures using FMeasure
-func FMeasure(text string, body interface{}, samples int) bool {
- globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
- return true
-}
-
-//You can mark Maeasurements as pending using PMeasure
-func PMeasure(text string, _ ...interface{}) bool {
- globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Maeasurements as pending using XMeasure
-func XMeasure(text string, _ ...interface{}) bool {
- globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
-//parallel node process will call BeforeSuite.
-//
-//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
-//
-//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
-func BeforeSuite(body interface{}, timeout ...float64) bool {
- globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
-//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
-//
-//When running in parallel, each parallel node process will call AfterSuite.
-//
-//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
-//
-//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
-func AfterSuite(body interface{}, timeout ...float64) bool {
- globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
-//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
-//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
-//until that node is done before running.
-//
-//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
-//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
-//to the second function (on all the other nodes).
-//
-//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
-//
-// func() []byte
-//
-//or, to run asynchronously:
-//
-// func(done Done) []byte
-//
-//The byte array returned by the first function is then passed to the second function, which has the signature:
-//
-// func(data []byte)
-//
-//or, to run asynchronously:
-//
-// func(data []byte, done Done)
-//
-//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
-//
-// var dbClient db.Client
-// var dbRunner db.Runner
-//
-// var _ = SynchronizedBeforeSuite(func() []byte {
-// dbRunner = db.NewRunner()
-// err := dbRunner.Start()
-// Ω(err).ShouldNot(HaveOccurred())
-// return []byte(dbRunner.URL)
-// }, func(data []byte) {
-// dbClient = db.NewClient()
-// err := dbClient.Connect(string(data))
-// Ω(err).ShouldNot(HaveOccurred())
-// })
-func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
- globalSuite.SetSynchronizedBeforeSuiteNode(
- node1Body,
- allNodesBody,
- codelocation.New(1),
- parseTimeout(timeout...),
- )
- return true
-}
-
-//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
-//external singleton resources shared across nodes when running tests in parallel.
-//
-//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
-//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
-//all other nodes are finished.
-//
-//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
-//
-//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
-//only after all nodes have finished:
-//
-// var _ = SynchronizedAfterSuite(func() {
-// dbClient.Cleanup()
-// }, func() {
-// dbRunner.Stop()
-// })
-func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
- globalSuite.SetSynchronizedAfterSuiteNode(
- allNodesBody,
- node1Body,
- codelocation.New(1),
- parseTimeout(timeout...),
- )
- return true
-}
-
-//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
-//Describe and Context blocks the outermost BeforeEach blocks are run first.
-//
-//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func BeforeEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
-//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
-//
-//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func JustBeforeEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
-//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
-//
-//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func JustAfterEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
-//Describe and Context blocks the innermost AfterEach blocks are run first.
-//
-//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func AfterEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-func parseTimeout(timeout ...float64) time.Duration {
- if len(timeout) == 0 {
- return time.Duration(defaultTimeout * int64(time.Second))
- } else {
- return time.Duration(timeout[0] * float64(time.Second))
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
deleted file mode 100644
index fa2f0bf7..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package codelocation
-
-import (
- "regexp"
- "runtime"
- "runtime/debug"
- "strings"
-
- "github.com/onsi/ginkgo/types"
-)
-
-func New(skip int) types.CodeLocation {
- _, file, line, _ := runtime.Caller(skip + 1)
- stackTrace := PruneStack(string(debug.Stack()), skip)
- return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
-}
-
-func PruneStack(fullStackTrace string, skip int) string {
- stack := strings.Split(fullStackTrace, "\n")
- if len(stack) > 2*(skip+1) {
- stack = stack[2*(skip+1):]
- }
- prunedStack := []string{}
- re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
- for i := 0; i < len(stack)/2; i++ {
- if !re.Match([]byte(stack[i*2])) {
- prunedStack = append(prunedStack, stack[i*2])
- prunedStack = append(prunedStack, stack[i*2+1])
- }
- }
- return strings.Join(prunedStack, "\n")
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
deleted file mode 100644
index 0737746d..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package containernode
-
-import (
- "math/rand"
- "sort"
-
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-type subjectOrContainerNode struct {
- containerNode *ContainerNode
- subjectNode leafnodes.SubjectNode
-}
-
-func (n subjectOrContainerNode) text() string {
- if n.containerNode != nil {
- return n.containerNode.Text()
- } else {
- return n.subjectNode.Text()
- }
-}
-
-type CollatedNodes struct {
- Containers []*ContainerNode
- Subject leafnodes.SubjectNode
-}
-
-type ContainerNode struct {
- text string
- flag types.FlagType
- codeLocation types.CodeLocation
-
- setupNodes []leafnodes.BasicNode
- subjectAndContainerNodes []subjectOrContainerNode
-}
-
-func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
- return &ContainerNode{
- text: text,
- flag: flag,
- codeLocation: codeLocation,
- }
-}
-
-func (container *ContainerNode) Shuffle(r *rand.Rand) {
- sort.Sort(container)
- permutation := r.Perm(len(container.subjectAndContainerNodes))
- shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
- for i, j := range permutation {
- shuffledNodes[i] = container.subjectAndContainerNodes[j]
- }
- container.subjectAndContainerNodes = shuffledNodes
-}
-
-func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
- if node.flag == types.FlagTypePending {
- return false
- }
-
- shouldUnfocus := false
- for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
- if subjectOrContainerNode.containerNode != nil {
- shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
- } else {
- shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
- }
- }
-
- if shouldUnfocus {
- if node.flag == types.FlagTypeFocused {
- node.flag = types.FlagTypeNone
- }
- return true
- }
-
- return node.flag == types.FlagTypeFocused
-}
-
-func (node *ContainerNode) Collate() []CollatedNodes {
- return node.collate([]*ContainerNode{})
-}
-
-func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
- collated := make([]CollatedNodes, 0)
-
- containers := make([]*ContainerNode, len(enclosingContainers))
- copy(containers, enclosingContainers)
- containers = append(containers, node)
-
- for _, subjectOrContainer := range node.subjectAndContainerNodes {
- if subjectOrContainer.containerNode != nil {
- collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
- } else {
- collated = append(collated, CollatedNodes{
- Containers: containers,
- Subject: subjectOrContainer.subjectNode,
- })
- }
- }
-
- return collated
-}
-
-func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
- node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
-}
-
-func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
- node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
-}
-
-func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
- node.setupNodes = append(node.setupNodes, setupNode)
-}
-
-func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
- nodes := []leafnodes.BasicNode{}
- for _, setupNode := range node.setupNodes {
- if setupNode.Type() == nodeType {
- nodes = append(nodes, setupNode)
- }
- }
- return nodes
-}
-
-func (node *ContainerNode) Text() string {
- return node.text
-}
-
-func (node *ContainerNode) CodeLocation() types.CodeLocation {
- return node.codeLocation
-}
-
-func (node *ContainerNode) Flag() types.FlagType {
- return node.flag
-}
-
-//sort.Interface
-
-func (node *ContainerNode) Len() int {
- return len(node.subjectAndContainerNodes)
-}
-
-func (node *ContainerNode) Less(i, j int) bool {
- return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
-}
-
-func (node *ContainerNode) Swap(i, j int) {
- node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
deleted file mode 100644
index 678ea251..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package failer
-
-import (
- "fmt"
- "sync"
-
- "github.com/onsi/ginkgo/types"
-)
-
-type Failer struct {
- lock *sync.Mutex
- failure types.SpecFailure
- state types.SpecState
-}
-
-func New() *Failer {
- return &Failer{
- lock: &sync.Mutex{},
- state: types.SpecStatePassed,
- }
-}
-
-func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if f.state == types.SpecStatePassed {
- f.state = types.SpecStatePanicked
- f.failure = types.SpecFailure{
- Message: "Test Panicked",
- Location: location,
- ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
- }
- }
-}
-
-func (f *Failer) Timeout(location types.CodeLocation) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if f.state == types.SpecStatePassed {
- f.state = types.SpecStateTimedOut
- f.failure = types.SpecFailure{
- Message: "Timed out",
- Location: location,
- }
- }
-}
-
-func (f *Failer) Fail(message string, location types.CodeLocation) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if f.state == types.SpecStatePassed {
- f.state = types.SpecStateFailed
- f.failure = types.SpecFailure{
- Message: message,
- Location: location,
- }
- }
-}
-
-func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- failure := f.failure
- outcome := f.state
- if outcome != types.SpecStatePassed {
- failure.ComponentType = componentType
- failure.ComponentIndex = componentIndex
- failure.ComponentCodeLocation = componentCodeLocation
- }
-
- f.state = types.SpecStatePassed
- f.failure = types.SpecFailure{}
-
- return failure, outcome
-}
-
-func (f *Failer) Skip(message string, location types.CodeLocation) {
- f.lock.Lock()
- defer f.lock.Unlock()
-
- if f.state == types.SpecStatePassed {
- f.state = types.SpecStateSkipped
- f.failure = types.SpecFailure{
- Message: message,
- Location: location,
- }
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
deleted file mode 100644
index d6d54234..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package leafnodes
-
-import (
- "math"
- "time"
-
- "sync"
-
- "github.com/onsi/ginkgo/types"
-)
-
-type benchmarker struct {
- mu sync.Mutex
- measurements map[string]*types.SpecMeasurement
- orderCounter int
-}
-
-func newBenchmarker() *benchmarker {
- return &benchmarker{
- measurements: make(map[string]*types.SpecMeasurement, 0),
- }
-}
-
-func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
- t := time.Now()
- body()
- elapsedTime = time.Since(t)
-
- b.mu.Lock()
- defer b.mu.Unlock()
- measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
- measurement.Results = append(measurement.Results, elapsedTime.Seconds())
-
- return
-}
-
-func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
- b.mu.Lock()
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
- defer b.mu.Unlock()
- measurement.Results = append(measurement.Results, value)
-}
-
-func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
- b.mu.Lock()
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
- defer b.mu.Unlock()
- measurement.Results = append(measurement.Results, value)
-}
-
-func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
- measurement, ok := b.measurements[name]
- if !ok {
- var computedInfo interface{}
- computedInfo = nil
- if len(info) > 0 {
- computedInfo = info[0]
- }
- measurement = &types.SpecMeasurement{
- Name: name,
- Info: computedInfo,
- Order: b.orderCounter,
- SmallestLabel: smallestLabel,
- LargestLabel: largestLabel,
- AverageLabel: averageLabel,
- Units: units,
- Precision: precision,
- Results: make([]float64, 0),
- }
- b.measurements[name] = measurement
- b.orderCounter++
- }
-
- return measurement
-}
-
-func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
- b.mu.Lock()
- defer b.mu.Unlock()
- for _, measurement := range b.measurements {
- measurement.Smallest = math.MaxFloat64
- measurement.Largest = -math.MaxFloat64
- sum := float64(0)
- sumOfSquares := float64(0)
-
- for _, result := range measurement.Results {
- if result > measurement.Largest {
- measurement.Largest = result
- }
- if result < measurement.Smallest {
- measurement.Smallest = result
- }
- sum += result
- sumOfSquares += result * result
- }
-
- n := float64(len(measurement.Results))
- measurement.Average = sum / n
- measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
- }
-
- return b.measurements
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
deleted file mode 100644
index 8c3902d6..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package leafnodes
-
-import (
- "github.com/onsi/ginkgo/types"
-)
-
-type BasicNode interface {
- Type() types.SpecComponentType
- Run() (types.SpecState, types.SpecFailure)
- CodeLocation() types.CodeLocation
-}
-
-type SubjectNode interface {
- BasicNode
-
- Text() string
- Flag() types.FlagType
- Samples() int
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
deleted file mode 100644
index 6eded7b7..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type ItNode struct {
- runner *runner
-
- flag types.FlagType
- text string
-}
-
-func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
- return &ItNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
- flag: flag,
- text: text,
- }
-}
-
-func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *ItNode) Type() types.SpecComponentType {
- return types.SpecComponentTypeIt
-}
-
-func (node *ItNode) Text() string {
- return node.text
-}
-
-func (node *ItNode) Flag() types.FlagType {
- return node.flag
-}
-
-func (node *ItNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func (node *ItNode) Samples() int {
- return 1
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
deleted file mode 100644
index 3ab9a6d5..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package leafnodes
-
-import (
- "reflect"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type MeasureNode struct {
- runner *runner
-
- text string
- flag types.FlagType
- samples int
- benchmarker *benchmarker
-}
-
-func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
- benchmarker := newBenchmarker()
-
- wrappedBody := func() {
- reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
- }
-
- return &MeasureNode{
- runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
-
- text: text,
- flag: flag,
- samples: samples,
- benchmarker: benchmarker,
- }
-}
-
-func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
- return node.benchmarker.measurementsReport()
-}
-
-func (node *MeasureNode) Type() types.SpecComponentType {
- return types.SpecComponentTypeMeasure
-}
-
-func (node *MeasureNode) Text() string {
- return node.text
-}
-
-func (node *MeasureNode) Flag() types.FlagType {
- return node.flag
-}
-
-func (node *MeasureNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func (node *MeasureNode) Samples() int {
- return node.samples
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
deleted file mode 100644
index 16cb66c3..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package leafnodes
-
-import (
- "fmt"
- "reflect"
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type runner struct {
- isAsync bool
- asyncFunc func(chan<- interface{})
- syncFunc func()
- codeLocation types.CodeLocation
- timeoutThreshold time.Duration
- nodeType types.SpecComponentType
- componentIndex int
- failer *failer.Failer
-}
-
-func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
- bodyType := reflect.TypeOf(body)
- if bodyType.Kind() != reflect.Func {
- panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
- }
-
- runner := &runner{
- codeLocation: codeLocation,
- timeoutThreshold: timeout,
- failer: failer,
- nodeType: nodeType,
- componentIndex: componentIndex,
- }
-
- switch bodyType.NumIn() {
- case 0:
- runner.syncFunc = body.(func())
- return runner
- case 1:
- if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
- panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
- }
-
- wrappedBody := func(done chan<- interface{}) {
- bodyValue := reflect.ValueOf(body)
- bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
- }
-
- runner.isAsync = true
- runner.asyncFunc = wrappedBody
- return runner
- }
-
- panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
-}
-
-func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
- if r.isAsync {
- return r.runAsync()
- } else {
- return r.runSync()
- }
-}
-
-func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
- done := make(chan interface{}, 1)
-
- go func() {
- finished := false
-
- defer func() {
- if e := recover(); e != nil || !finished {
- r.failer.Panic(codelocation.New(2), e)
- select {
- case <-done:
- break
- default:
- close(done)
- }
- }
- }()
-
- r.asyncFunc(done)
- finished = true
- }()
-
- // If this goroutine gets no CPU time before the select block,
- // the <-done case may complete even if the test took longer than the timeoutThreshold.
- // This can cause flaky behaviour, but we haven't seen it in the wild.
- select {
- case <-done:
- case <-time.After(r.timeoutThreshold):
- r.failer.Timeout(r.codeLocation)
- }
-
- failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
- return
-}
-func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
- finished := false
-
- defer func() {
- if e := recover(); e != nil || !finished {
- r.failer.Panic(codelocation.New(2), e)
- }
-
- failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
- }()
-
- r.syncFunc()
- finished = true
-
- return
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
deleted file mode 100644
index e3e9cb7c..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type SetupNode struct {
- runner *runner
-}
-
-func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *SetupNode) Type() types.SpecComponentType {
- return node.runner.nodeType
-}
-
-func (node *SetupNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
- }
-}
-
-func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
- }
-}
-
-func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
- }
-}
-
-func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
deleted file mode 100644
index 80f16ed7..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type SuiteNode interface {
- Run(parallelNode int, parallelTotal int, syncHost string) bool
- Passed() bool
- Summary() *types.SetupSummary
-}
-
-type simpleSuiteNode struct {
- runner *runner
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- t := time.Now()
- node.outcome, node.failure = node.runner.run()
- node.runTime = time.Since(t)
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *simpleSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *simpleSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runner.nodeType,
- CodeLocation: node.runner.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &simpleSuiteNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
- }
-}
-
-func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &simpleSuiteNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
deleted file mode 100644
index a721d0cf..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package leafnodes
-
-import (
- "encoding/json"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type synchronizedAfterSuiteNode struct {
- runnerA *runner
- runnerB *runner
-
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &synchronizedAfterSuiteNode{
- runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- }
-}
-
-func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- node.outcome, node.failure = node.runnerA.run()
-
- if parallelNode == 1 {
- if parallelTotal > 1 {
- node.waitUntilOtherNodesAreDone(syncHost)
- }
-
- outcome, failure := node.runnerB.run()
-
- if node.outcome == types.SpecStatePassed {
- node.outcome, node.failure = outcome, failure
- }
- }
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedAfterSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runnerA.nodeType,
- CodeLocation: node.runnerA.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
- for {
- if node.canRun(syncHost) {
- return
- }
-
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
- resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
- if err != nil || resp.StatusCode != http.StatusOK {
- return false
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return false
- }
- resp.Body.Close()
-
- afterSuiteData := types.RemoteAfterSuiteData{}
- err = json.Unmarshal(body, &afterSuiteData)
- if err != nil {
- return false
- }
-
- return afterSuiteData.CanRun
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
deleted file mode 100644
index d5c88931..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package leafnodes
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
- "reflect"
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type synchronizedBeforeSuiteNode struct {
- runnerA *runner
- runnerB *runner
-
- data []byte
-
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- node := &synchronizedBeforeSuiteNode{}
-
- node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
- node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
-
- return node
-}
-
-func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- t := time.Now()
- defer func() {
- node.runTime = time.Since(t)
- }()
-
- if parallelNode == 1 {
- node.outcome, node.failure = node.runA(parallelTotal, syncHost)
- } else {
- node.outcome, node.failure = node.waitForA(syncHost)
- }
-
- if node.outcome != types.SpecStatePassed {
- return false
- }
- node.outcome, node.failure = node.runnerB.run()
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
- outcome, failure := node.runnerA.run()
-
- if parallelTotal > 1 {
- state := types.RemoteBeforeSuiteStatePassed
- if outcome != types.SpecStatePassed {
- state = types.RemoteBeforeSuiteStateFailed
- }
- json := (types.RemoteBeforeSuiteData{
- Data: node.data,
- State: state,
- }).ToJSON()
- http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
- }
-
- return outcome, failure
-}
-
-func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
- failure := func(message string) types.SpecFailure {
- return types.SpecFailure{
- Message: message,
- Location: node.runnerA.codeLocation,
- ComponentType: node.runnerA.nodeType,
- ComponentIndex: node.runnerA.componentIndex,
- ComponentCodeLocation: node.runnerA.codeLocation,
- }
- }
- for {
- resp, err := http.Get(syncHost + "/BeforeSuiteState")
- if err != nil || resp.StatusCode != http.StatusOK {
- return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
- }
- resp.Body.Close()
-
- beforeSuiteData := types.RemoteBeforeSuiteData{}
- err = json.Unmarshal(body, &beforeSuiteData)
- if err != nil {
- return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
- }
-
- switch beforeSuiteData.State {
- case types.RemoteBeforeSuiteStatePassed:
- node.data = beforeSuiteData.Data
- return types.SpecStatePassed, types.SpecFailure{}
- case types.RemoteBeforeSuiteStateFailed:
- return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
- case types.RemoteBeforeSuiteStateDisappeared:
- return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
- }
-
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runnerA.nodeType,
- CodeLocation: node.runnerA.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
- typeA := reflect.TypeOf(bodyA)
- if typeA.Kind() != reflect.Func {
- panic("SynchronizedBeforeSuite expects a function as its first argument")
- }
-
- takesNothing := typeA.NumIn() == 0
- takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
- returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
-
- if !((takesNothing || takesADoneChannel) && returnsBytes) {
- panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
- }
-
- if takesADoneChannel {
- return func(done chan<- interface{}) {
- out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
- node.data = out[0].Interface().([]byte)
- }
- }
-
- return func() {
- out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
- node.data = out[0].Interface().([]byte)
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
- typeB := reflect.TypeOf(bodyB)
- if typeB.Kind() != reflect.Func {
- panic("SynchronizedBeforeSuite expects a function as its second argument")
- }
-
- returnsNothing := typeB.NumOut() == 0
- takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
- takesBytesAndDone := typeB.NumIn() == 2 &&
- typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
- typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
-
- if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
- panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
- }
-
- if takesBytesAndDone {
- return func(done chan<- interface{}) {
- reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
- }
- }
-
- return func() {
- reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
deleted file mode 100644
index 6b54afe0..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
-
-Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
-coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
-
- ginkgo -nodes=N
-
-where N is the number of nodes you desire.
-*/
-package remote
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
-)
-
-type configAndSuite struct {
- config config.GinkgoConfigType
- summary *types.SuiteSummary
-}
-
-type Aggregator struct {
- nodeCount int
- config config.DefaultReporterConfigType
- stenographer stenographer.Stenographer
- result chan bool
-
- suiteBeginnings chan configAndSuite
- aggregatedSuiteBeginnings []configAndSuite
-
- beforeSuites chan *types.SetupSummary
- aggregatedBeforeSuites []*types.SetupSummary
-
- afterSuites chan *types.SetupSummary
- aggregatedAfterSuites []*types.SetupSummary
-
- specCompletions chan *types.SpecSummary
- completedSpecs []*types.SpecSummary
-
- suiteEndings chan *types.SuiteSummary
- aggregatedSuiteEndings []*types.SuiteSummary
- specs []*types.SpecSummary
-
- startTime time.Time
-}
-
-func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
- aggregator := &Aggregator{
- nodeCount: nodeCount,
- result: result,
- config: config,
- stenographer: stenographer,
-
- suiteBeginnings: make(chan configAndSuite, 0),
- beforeSuites: make(chan *types.SetupSummary, 0),
- afterSuites: make(chan *types.SetupSummary, 0),
- specCompletions: make(chan *types.SpecSummary, 0),
- suiteEndings: make(chan *types.SuiteSummary, 0),
- }
-
- go aggregator.mux()
-
- return aggregator
-}
-
-func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- aggregator.suiteBeginnings <- configAndSuite{config, summary}
-}
-
-func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- aggregator.beforeSuites <- setupSummary
-}
-
-func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- aggregator.afterSuites <- setupSummary
-}
-
-func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
- //noop
-}
-
-func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
- aggregator.specCompletions <- specSummary
-}
-
-func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- aggregator.suiteEndings <- summary
-}
-
-func (aggregator *Aggregator) mux() {
-loop:
- for {
- select {
- case configAndSuite := <-aggregator.suiteBeginnings:
- aggregator.registerSuiteBeginning(configAndSuite)
- case setupSummary := <-aggregator.beforeSuites:
- aggregator.registerBeforeSuite(setupSummary)
- case setupSummary := <-aggregator.afterSuites:
- aggregator.registerAfterSuite(setupSummary)
- case specSummary := <-aggregator.specCompletions:
- aggregator.registerSpecCompletion(specSummary)
- case suite := <-aggregator.suiteEndings:
- finished, passed := aggregator.registerSuiteEnding(suite)
- if finished {
- aggregator.result <- passed
- break loop
- }
- }
- }
-}
-
-func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
- aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
-
- if len(aggregator.aggregatedSuiteBeginnings) == 1 {
- aggregator.startTime = time.Now()
- }
-
- if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
- return
- }
-
- aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
-
- totalNumberOfSpecs := 0
- if len(aggregator.aggregatedSuiteBeginnings) > 0 {
- totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
- }
-
- aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
- aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
- aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
- aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
- aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
- aggregator.specs = append(aggregator.specs, specSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) flushCompletedSpecs() {
- if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
- return
- }
-
- for _, setupSummary := range aggregator.aggregatedBeforeSuites {
- aggregator.announceBeforeSuite(setupSummary)
- }
-
- for _, specSummary := range aggregator.completedSpecs {
- aggregator.announceSpec(specSummary)
- }
-
- for _, setupSummary := range aggregator.aggregatedAfterSuites {
- aggregator.announceAfterSuite(setupSummary)
- }
-
- aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
- aggregator.completedSpecs = []*types.SpecSummary{}
- aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
-}
-
-func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
- aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
- if setupSummary.State != types.SpecStatePassed {
- aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
- aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
- if setupSummary.State != types.SpecStatePassed {
- aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
- if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
- aggregator.stenographer.AnnounceSpecWillRun(specSummary)
- }
-
- aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
-
- switch specSummary.State {
- case types.SpecStatePassed:
- if specSummary.IsMeasurement {
- aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
- } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
- aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
- } else {
- aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
- }
-
- case types.SpecStatePending:
- aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
- case types.SpecStateSkipped:
- aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
- case types.SpecStateTimedOut:
- aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- case types.SpecStatePanicked:
- aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- case types.SpecStateFailed:
- aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
- aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
- if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
- return false, false
- }
-
- aggregatedSuiteSummary := &types.SuiteSummary{}
- aggregatedSuiteSummary.SuiteSucceeded = true
-
- for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
- if suiteSummary.SuiteSucceeded == false {
- aggregatedSuiteSummary.SuiteSucceeded = false
- }
-
- aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
- aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
- aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
- aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
- aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
- aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
- aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
- }
-
- aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
-
- aggregator.stenographer.SummarizeFailures(aggregator.specs)
- aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
-
- return true, aggregatedSuiteSummary.SuiteSucceeded
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
deleted file mode 100644
index 284bc62e..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package remote
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "os"
-
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/reporters/stenographer"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-//An interface to net/http's client to allow the injection of fakes under test
-type Poster interface {
- Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
-}
-
-/*
-The ForwardingReporter is a Ginkgo reporter that forwards information to
-a Ginkgo remote server.
-
-When streaming parallel test output, this repoter is automatically installed by Ginkgo.
-
-This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
-detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
-in place of Ginkgo's DefaultReporter.
-*/
-
-type ForwardingReporter struct {
- serverHost string
- poster Poster
- outputInterceptor OutputInterceptor
- debugMode bool
- debugFile *os.File
- nestedReporter *reporters.DefaultReporter
-}
-
-func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
- reporter := &ForwardingReporter{
- serverHost: serverHost,
- poster: poster,
- outputInterceptor: outputInterceptor,
- }
-
- if debugFile != "" {
- var err error
- reporter.debugMode = true
- reporter.debugFile, err = os.Create(debugFile)
- if err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
-
- if !config.Verbose {
- //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
- ginkgoWriter.AndRedirectTo(reporter.debugFile)
- }
- outputInterceptor.StreamTo(reporter.debugFile) //This is not working
-
- stenographer := stenographer.New(false, true, reporter.debugFile)
- config.Succinct = false
- config.Verbose = true
- config.FullTrace = true
- reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
- }
-
- return reporter
-}
-
-func (reporter *ForwardingReporter) post(path string, data interface{}) {
- encoded, _ := json.Marshal(data)
- buffer := bytes.NewBuffer(encoded)
- reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
-}
-
-func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
- data := struct {
- Config config.GinkgoConfigType `json:"config"`
- Summary *types.SuiteSummary `json:"suite-summary"`
- }{
- conf,
- summary,
- }
-
- reporter.outputInterceptor.StartInterceptingOutput()
- if reporter.debugMode {
- reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecSuiteWillBegin", data)
-}
-
-func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- setupSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/BeforeSuiteDidRun", setupSummary)
-}
-
-func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if reporter.debugMode {
- reporter.nestedReporter.SpecWillRun(specSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecWillRun", specSummary)
-}
-
-func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- specSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.SpecDidComplete(specSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecDidComplete", specSummary)
-}
-
-func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- setupSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/AfterSuiteDidRun", setupSummary)
-}
-
-func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- if reporter.debugMode {
- reporter.nestedReporter.SpecSuiteDidEnd(summary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecSuiteDidEnd", summary)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
deleted file mode 100644
index 5154abe8..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package remote
-
-import "os"
-
-/*
-The OutputInterceptor is used by the ForwardingReporter to
-intercept and capture all stdin and stderr output during a test run.
-*/
-type OutputInterceptor interface {
- StartInterceptingOutput() error
- StopInterceptingAndReturnOutput() (string, error)
- StreamTo(*os.File)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
deleted file mode 100644
index ab6622a2..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// +build freebsd openbsd netbsd dragonfly darwin linux solaris
-
-package remote
-
-import (
- "errors"
- "io/ioutil"
- "os"
-
- "github.com/hpcloud/tail"
-)
-
-func NewOutputInterceptor() OutputInterceptor {
- return &outputInterceptor{}
-}
-
-type outputInterceptor struct {
- redirectFile *os.File
- streamTarget *os.File
- intercepting bool
- tailer *tail.Tail
- doneTailing chan bool
-}
-
-func (interceptor *outputInterceptor) StartInterceptingOutput() error {
- if interceptor.intercepting {
- return errors.New("Already intercepting output!")
- }
- interceptor.intercepting = true
-
- var err error
-
- interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
- if err != nil {
- return err
- }
-
- // Call a function in ./syscall_dup_*.go
- // If building for everything other than linux_arm64,
- // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
- // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
- syscallDup(int(interceptor.redirectFile.Fd()), 1)
- syscallDup(int(interceptor.redirectFile.Fd()), 2)
-
- if interceptor.streamTarget != nil {
- interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
- interceptor.doneTailing = make(chan bool)
-
- go func() {
- for line := range interceptor.tailer.Lines {
- interceptor.streamTarget.Write([]byte(line.Text + "\n"))
- }
- close(interceptor.doneTailing)
- }()
- }
-
- return nil
-}
-
-func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
- if !interceptor.intercepting {
- return "", errors.New("Not intercepting output!")
- }
-
- interceptor.redirectFile.Close()
- output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
- os.Remove(interceptor.redirectFile.Name())
-
- interceptor.intercepting = false
-
- if interceptor.streamTarget != nil {
- interceptor.tailer.Stop()
- interceptor.tailer.Cleanup()
- <-interceptor.doneTailing
- interceptor.streamTarget.Sync()
- }
-
- return string(output), err
-}
-
-func (interceptor *outputInterceptor) StreamTo(out *os.File) {
- interceptor.streamTarget = out
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
deleted file mode 100644
index 40c79033..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// +build windows
-
-package remote
-
-import (
- "errors"
- "os"
-)
-
-func NewOutputInterceptor() OutputInterceptor {
- return &outputInterceptor{}
-}
-
-type outputInterceptor struct {
- intercepting bool
-}
-
-func (interceptor *outputInterceptor) StartInterceptingOutput() error {
- if interceptor.intercepting {
- return errors.New("Already intercepting output!")
- }
- interceptor.intercepting = true
-
- // not working on windows...
-
- return nil
-}
-
-func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
- // not working on windows...
- interceptor.intercepting = false
-
- return "", nil
-}
-
-func (interceptor *outputInterceptor) StreamTo(*os.File) {}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go
deleted file mode 100644
index 367c54da..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
-
-The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
-This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
-
-*/
-
-package remote
-
-import (
- "encoding/json"
- "io/ioutil"
- "net"
- "net/http"
- "sync"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-)
-
-/*
-Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
-It then forwards that communication to attached reporters.
-*/
-type Server struct {
- listener net.Listener
- reporters []reporters.Reporter
- alives []func() bool
- lock *sync.Mutex
- beforeSuiteData types.RemoteBeforeSuiteData
- parallelTotal int
- counter int
-}
-
-//Create a new server, automatically selecting a port
-func NewServer(parallelTotal int) (*Server, error) {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return nil, err
- }
- return &Server{
- listener: listener,
- lock: &sync.Mutex{},
- alives: make([]func() bool, parallelTotal),
- beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
- parallelTotal: parallelTotal,
- }, nil
-}
-
-//Start the server. You don't need to `go s.Start()`, just `s.Start()`
-func (server *Server) Start() {
- httpServer := &http.Server{}
- mux := http.NewServeMux()
- httpServer.Handler = mux
-
- //streaming endpoints
- mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
- mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
- mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
- mux.HandleFunc("/SpecWillRun", server.specWillRun)
- mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
- mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
-
- //synchronization endpoints
- mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
- mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
- mux.HandleFunc("/counter", server.handleCounter)
- mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
-
- go httpServer.Serve(server.listener)
-}
-
-//Stop the server
-func (server *Server) Close() {
- server.listener.Close()
-}
-
-//The address the server can be reached it. Pass this into the `ForwardingReporter`.
-func (server *Server) Address() string {
- return "http://" + server.listener.Addr().String()
-}
-
-//
-// Streaming Endpoints
-//
-
-//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
-func (server *Server) readAll(request *http.Request) []byte {
- defer request.Body.Close()
- body, _ := ioutil.ReadAll(request.Body)
- return body
-}
-
-func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
- server.reporters = reporters
-}
-
-func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
-
- var data struct {
- Config config.GinkgoConfigType `json:"config"`
- Summary *types.SuiteSummary `json:"suite-summary"`
- }
-
- json.Unmarshal(body, &data)
-
- for _, reporter := range server.reporters {
- reporter.SpecSuiteWillBegin(data.Config, data.Summary)
- }
-}
-
-func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var setupSummary *types.SetupSummary
- json.Unmarshal(body, &setupSummary)
-
- for _, reporter := range server.reporters {
- reporter.BeforeSuiteDidRun(setupSummary)
- }
-}
-
-func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var setupSummary *types.SetupSummary
- json.Unmarshal(body, &setupSummary)
-
- for _, reporter := range server.reporters {
- reporter.AfterSuiteDidRun(setupSummary)
- }
-}
-
-func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var specSummary *types.SpecSummary
- json.Unmarshal(body, &specSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecWillRun(specSummary)
- }
-}
-
-func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var specSummary *types.SpecSummary
- json.Unmarshal(body, &specSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecDidComplete(specSummary)
- }
-}
-
-func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var suiteSummary *types.SuiteSummary
- json.Unmarshal(body, &suiteSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecSuiteDidEnd(suiteSummary)
- }
-}
-
-//
-// Synchronization Endpoints
-//
-
-func (server *Server) RegisterAlive(node int, alive func() bool) {
- server.lock.Lock()
- defer server.lock.Unlock()
- server.alives[node-1] = alive
-}
-
-func (server *Server) nodeIsAlive(node int) bool {
- server.lock.Lock()
- defer server.lock.Unlock()
- alive := server.alives[node-1]
- if alive == nil {
- return true
- }
- return alive()
-}
-
-func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
- if request.Method == "POST" {
- dec := json.NewDecoder(request.Body)
- dec.Decode(&(server.beforeSuiteData))
- } else {
- beforeSuiteData := server.beforeSuiteData
- if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
- beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
- }
- enc := json.NewEncoder(writer)
- enc.Encode(beforeSuiteData)
- }
-}
-
-func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
- afterSuiteData := types.RemoteAfterSuiteData{
- CanRun: true,
- }
- for i := 2; i <= server.parallelTotal; i++ {
- afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
- }
-
- enc := json.NewEncoder(writer)
- enc.Encode(afterSuiteData)
-}
-
-func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
- c := spec_iterator.Counter{}
- server.lock.Lock()
- c.Index = server.counter
- server.counter = server.counter + 1
- server.lock.Unlock()
-
- json.NewEncoder(writer).Encode(c)
-}
-
-func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
- writer.Write([]byte(""))
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
deleted file mode 100644
index 9550d37b..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux,arm64
-
-package remote
-
-import "syscall"
-
-// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
-// use the nearly identical syscall.Dup3 instead
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup3(oldfd, newfd, 0)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
deleted file mode 100644
index 75ef7fb7..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build solaris
-
-package remote
-
-import "golang.org/x/sys/unix"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return unix.Dup2(oldfd, newfd)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
deleted file mode 100644
index ef625596..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !linux !arm64
-// +build !windows
-// +build !solaris
-
-package remote
-
-import "syscall"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup2(oldfd, newfd)
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
deleted file mode 100644
index 7fd68ee8..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package spec
-
-import (
- "fmt"
- "io"
- "time"
-
- "sync"
-
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-type Spec struct {
- subject leafnodes.SubjectNode
- focused bool
- announceProgress bool
-
- containers []*containernode.ContainerNode
-
- state types.SpecState
- runTime time.Duration
- startTime time.Time
- failure types.SpecFailure
- previousFailures bool
-
- stateMutex *sync.Mutex
-}
-
-func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
- spec := &Spec{
- subject: subject,
- containers: containers,
- focused: subject.Flag() == types.FlagTypeFocused,
- announceProgress: announceProgress,
- stateMutex: &sync.Mutex{},
- }
-
- spec.processFlag(subject.Flag())
- for i := len(containers) - 1; i >= 0; i-- {
- spec.processFlag(containers[i].Flag())
- }
-
- return spec
-}
-
-func (spec *Spec) processFlag(flag types.FlagType) {
- if flag == types.FlagTypeFocused {
- spec.focused = true
- } else if flag == types.FlagTypePending {
- spec.setState(types.SpecStatePending)
- }
-}
-
-func (spec *Spec) Skip() {
- spec.setState(types.SpecStateSkipped)
-}
-
-func (spec *Spec) Failed() bool {
- return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
-}
-
-func (spec *Spec) Passed() bool {
- return spec.getState() == types.SpecStatePassed
-}
-
-func (spec *Spec) Flaked() bool {
- return spec.getState() == types.SpecStatePassed && spec.previousFailures
-}
-
-func (spec *Spec) Pending() bool {
- return spec.getState() == types.SpecStatePending
-}
-
-func (spec *Spec) Skipped() bool {
- return spec.getState() == types.SpecStateSkipped
-}
-
-func (spec *Spec) Focused() bool {
- return spec.focused
-}
-
-func (spec *Spec) IsMeasurement() bool {
- return spec.subject.Type() == types.SpecComponentTypeMeasure
-}
-
-func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
- componentTexts := make([]string, len(spec.containers)+1)
- componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
-
- for i, container := range spec.containers {
- componentTexts[i] = container.Text()
- componentCodeLocations[i] = container.CodeLocation()
- }
-
- componentTexts[len(spec.containers)] = spec.subject.Text()
- componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
-
- runTime := spec.runTime
- if runTime == 0 && !spec.startTime.IsZero() {
- runTime = time.Since(spec.startTime)
- }
-
- return &types.SpecSummary{
- IsMeasurement: spec.IsMeasurement(),
- NumberOfSamples: spec.subject.Samples(),
- ComponentTexts: componentTexts,
- ComponentCodeLocations: componentCodeLocations,
- State: spec.getState(),
- RunTime: runTime,
- Failure: spec.failure,
- Measurements: spec.measurementsReport(),
- SuiteID: suiteID,
- }
-}
-
-func (spec *Spec) ConcatenatedString() string {
- s := ""
- for _, container := range spec.containers {
- s += container.Text() + " "
- }
-
- return s + spec.subject.Text()
-}
-
-func (spec *Spec) Run(writer io.Writer) {
- if spec.getState() == types.SpecStateFailed {
- spec.previousFailures = true
- }
-
- spec.startTime = time.Now()
- defer func() {
- spec.runTime = time.Since(spec.startTime)
- }()
-
- for sample := 0; sample < spec.subject.Samples(); sample++ {
- spec.runSample(sample, writer)
-
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
-}
-
-func (spec *Spec) getState() types.SpecState {
- spec.stateMutex.Lock()
- defer spec.stateMutex.Unlock()
- return spec.state
-}
-
-func (spec *Spec) setState(state types.SpecState) {
- spec.stateMutex.Lock()
- defer spec.stateMutex.Unlock()
- spec.state = state
-}
-
-func (spec *Spec) runSample(sample int, writer io.Writer) {
- spec.setState(types.SpecStatePassed)
- spec.failure = types.SpecFailure{}
- innerMostContainerIndexToUnwind := -1
-
- defer func() {
- for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
- container := spec.containers[i]
- for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
- spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
- justAfterEachState, justAfterEachFailure := justAfterEach.Run()
- if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
- spec.state = justAfterEachState
- spec.failure = justAfterEachFailure
- }
- }
- }
-
- for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
- container := spec.containers[i]
- for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
- spec.announceSetupNode(writer, "AfterEach", container, afterEach)
- afterEachState, afterEachFailure := afterEach.Run()
- if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
- spec.setState(afterEachState)
- spec.failure = afterEachFailure
- }
- }
- }
- }()
-
- for i, container := range spec.containers {
- innerMostContainerIndexToUnwind = i
- for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
- spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
- s, f := beforeEach.Run()
- spec.failure = f
- spec.setState(s)
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
- }
-
- for _, container := range spec.containers {
- for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
- spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
- s, f := justBeforeEach.Run()
- spec.failure = f
- spec.setState(s)
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
- }
-
- spec.announceSubject(writer, spec.subject)
- s, f := spec.subject.Run()
- spec.failure = f
- spec.setState(s)
-}
-
-func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
- if spec.announceProgress {
- s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
- writer.Write([]byte(s))
- }
-}
-
-func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
- if spec.announceProgress {
- nodeType := ""
- switch subject.Type() {
- case types.SpecComponentTypeIt:
- nodeType = "It"
- case types.SpecComponentTypeMeasure:
- nodeType = "Measure"
- }
- s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
- writer.Write([]byte(s))
- }
-}
-
-func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
- if !spec.IsMeasurement() || spec.Failed() {
- return map[string]*types.SpecMeasurement{}
- }
-
- return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
deleted file mode 100644
index 006185ab..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package spec
-
-import (
- "math/rand"
- "regexp"
- "sort"
-)
-
-type Specs struct {
- specs []*Spec
- hasProgrammaticFocus bool
- RegexScansFilePath bool
-}
-
-func NewSpecs(specs []*Spec) *Specs {
- return &Specs{
- specs: specs,
- }
-}
-
-func (e *Specs) Specs() []*Spec {
- return e.specs
-}
-
-func (e *Specs) HasProgrammaticFocus() bool {
- return e.hasProgrammaticFocus
-}
-
-func (e *Specs) Shuffle(r *rand.Rand) {
- sort.Sort(e)
- permutation := r.Perm(len(e.specs))
- shuffledSpecs := make([]*Spec, len(e.specs))
- for i, j := range permutation {
- shuffledSpecs[i] = e.specs[j]
- }
- e.specs = shuffledSpecs
-}
-
-func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
- if focusString == "" && skipString == "" {
- e.applyProgrammaticFocus()
- } else {
- e.applyRegExpFocusAndSkip(description, focusString, skipString)
- }
-}
-
-func (e *Specs) applyProgrammaticFocus() {
- e.hasProgrammaticFocus = false
- for _, spec := range e.specs {
- if spec.Focused() && !spec.Pending() {
- e.hasProgrammaticFocus = true
- break
- }
- }
-
- if e.hasProgrammaticFocus {
- for _, spec := range e.specs {
- if !spec.Focused() {
- spec.Skip()
- }
- }
- }
-}
-
-// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
-// this is the place which we append to.
-func (e *Specs) toMatch(description string, spec *Spec) []byte {
- if e.RegexScansFilePath {
- return []byte(
- description + " " +
- spec.ConcatenatedString() + " " +
- spec.subject.CodeLocation().FileName)
- } else {
- return []byte(
- description + " " +
- spec.ConcatenatedString())
- }
-}
-
-func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) {
- for _, spec := range e.specs {
- matchesFocus := true
- matchesSkip := false
-
- toMatch := e.toMatch(description, spec)
-
- if focusString != "" {
- focusFilter := regexp.MustCompile(focusString)
- matchesFocus = focusFilter.Match([]byte(toMatch))
- }
-
- if skipString != "" {
- skipFilter := regexp.MustCompile(skipString)
- matchesSkip = skipFilter.Match([]byte(toMatch))
- }
-
- if !matchesFocus || matchesSkip {
- spec.Skip()
- }
- }
-}
-
-func (e *Specs) SkipMeasurements() {
- for _, spec := range e.specs {
- if spec.IsMeasurement() {
- spec.Skip()
- }
- }
-}
-
-//sort.Interface
-
-func (e *Specs) Len() int {
- return len(e.specs)
-}
-
-func (e *Specs) Less(i, j int) bool {
- return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
-}
-
-func (e *Specs) Swap(i, j int) {
- e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
deleted file mode 100644
index 82272554..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package spec_iterator
-
-func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
- if length == 0 {
- return 0, 0
- }
-
- // We have more nodes than tests. Trivial case.
- if parallelTotal >= length {
- if parallelNode > length {
- return 0, 0
- } else {
- return parallelNode - 1, 1
- }
- }
-
- // This is the minimum amount of tests that a node will be required to run
- minTestsPerNode := length / parallelTotal
-
- // This is the maximum amount of tests that a node will be required to run
- // The algorithm guarantees that this would be equal to at least the minimum amount
- // and at most one more
- maxTestsPerNode := minTestsPerNode
- if length%parallelTotal != 0 {
- maxTestsPerNode++
- }
-
- // Number of nodes that will have to run the maximum amount of tests per node
- numMaxLoadNodes := length % parallelTotal
-
- // Number of nodes that precede the current node and will have to run the maximum amount of tests per node
- var numPrecedingMaxLoadNodes int
- if parallelNode > numMaxLoadNodes {
- numPrecedingMaxLoadNodes = numMaxLoadNodes
- } else {
- numPrecedingMaxLoadNodes = parallelNode - 1
- }
-
- // Number of nodes that precede the current node and will have to run the minimum amount of tests per node
- var numPrecedingMinLoadNodes int
- if parallelNode <= numMaxLoadNodes {
- numPrecedingMinLoadNodes = 0
- } else {
- numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
- }
-
- // Evaluate the test start index and number of tests to run
- startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
- if parallelNode > numMaxLoadNodes {
- count = minTestsPerNode
- } else {
- count = maxTestsPerNode
- }
- return
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
deleted file mode 100644
index 99f548bc..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package spec_iterator
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-type ParallelIterator struct {
- specs []*spec.Spec
- host string
- client *http.Client
-}
-
-func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
- return &ParallelIterator{
- specs: specs,
- host: host,
- client: &http.Client{},
- }
-}
-
-func (s *ParallelIterator) Next() (*spec.Spec, error) {
- resp, err := s.client.Get(s.host + "/counter")
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
- }
-
- var counter Counter
- err = json.NewDecoder(resp.Body).Decode(&counter)
- if err != nil {
- return nil, err
- }
-
- if counter.Index >= len(s.specs) {
- return nil, ErrClosed
- }
-
- return s.specs[counter.Index], nil
-}
-
-func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return -1, false
-}
-
-func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- return -1, false
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
deleted file mode 100644
index a51c93b8..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package spec_iterator
-
-import (
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-type SerialIterator struct {
- specs []*spec.Spec
- index int
-}
-
-func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
- return &SerialIterator{
- specs: specs,
- index: 0,
- }
-}
-
-func (s *SerialIterator) Next() (*spec.Spec, error) {
- if s.index >= len(s.specs) {
- return nil, ErrClosed
- }
-
- spec := s.specs[s.index]
- s.index += 1
- return spec, nil
-}
-
-func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return len(s.specs), true
-}
-
-func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- count := 0
- for _, s := range s.specs {
- if !s.Skipped() && !s.Pending() {
- count += 1
- }
- }
- return count, true
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
deleted file mode 100644
index ad4a3ea3..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package spec_iterator
-
-import "github.com/onsi/ginkgo/internal/spec"
-
-type ShardedParallelIterator struct {
- specs []*spec.Spec
- index int
- maxIndex int
-}
-
-func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
- startIndex, count := ParallelizedIndexRange(len(specs), total, node)
-
- return &ShardedParallelIterator{
- specs: specs,
- index: startIndex,
- maxIndex: startIndex + count,
- }
-}
-
-func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
- if s.index >= s.maxIndex {
- return nil, ErrClosed
- }
-
- spec := s.specs[s.index]
- s.index += 1
- return spec, nil
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return s.maxIndex - s.index, true
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- count := 0
- for i := s.index; i < s.maxIndex; i += 1 {
- if !s.specs[i].Skipped() && !s.specs[i].Pending() {
- count += 1
- }
- }
- return count, true
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
deleted file mode 100644
index 74bffad6..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package spec_iterator
-
-import (
- "errors"
-
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-var ErrClosed = errors.New("no more specs to run")
-
-type SpecIterator interface {
- Next() (*spec.Spec, error)
- NumberOfSpecsPriorToIteration() int
- NumberOfSpecsToProcessIfKnown() (int, bool)
- NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
-}
-
-type Counter struct {
- Index int `json:"index"`
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
deleted file mode 100644
index a0b8b62d..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package specrunner
-
-import (
- "crypto/rand"
- "fmt"
-)
-
-func randomID() string {
- b := make([]byte, 8)
- _, err := rand.Read(b)
- if err != nil {
- return ""
- }
- return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
deleted file mode 100644
index 2c683cb8..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
+++ /dev/null
@@ -1,411 +0,0 @@
-package specrunner
-
-import (
- "fmt"
- "os"
- "os/signal"
- "sync"
- "syscall"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- Writer "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-
- "time"
-)
-
-type SpecRunner struct {
- description string
- beforeSuiteNode leafnodes.SuiteNode
- iterator spec_iterator.SpecIterator
- afterSuiteNode leafnodes.SuiteNode
- reporters []reporters.Reporter
- startTime time.Time
- suiteID string
- runningSpec *spec.Spec
- writer Writer.WriterInterface
- config config.GinkgoConfigType
- interrupted bool
- processedSpecs []*spec.Spec
- lock *sync.Mutex
-}
-
-func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
- return &SpecRunner{
- description: description,
- beforeSuiteNode: beforeSuiteNode,
- iterator: iterator,
- afterSuiteNode: afterSuiteNode,
- reporters: reporters,
- writer: writer,
- config: config,
- suiteID: randomID(),
- lock: &sync.Mutex{},
- }
-}
-
-func (runner *SpecRunner) Run() bool {
- if runner.config.DryRun {
- runner.performDryRun()
- return true
- }
-
- runner.reportSuiteWillBegin()
- signalRegistered := make(chan struct{})
- go runner.registerForInterrupts(signalRegistered)
- <-signalRegistered
-
- suitePassed := runner.runBeforeSuite()
-
- if suitePassed {
- suitePassed = runner.runSpecs()
- }
-
- runner.blockForeverIfInterrupted()
-
- suitePassed = runner.runAfterSuite() && suitePassed
-
- runner.reportSuiteDidEnd(suitePassed)
-
- return suitePassed
-}
-
-func (runner *SpecRunner) performDryRun() {
- runner.reportSuiteWillBegin()
-
- if runner.beforeSuiteNode != nil {
- summary := runner.beforeSuiteNode.Summary()
- summary.State = types.SpecStatePassed
- runner.reportBeforeSuite(summary)
- }
-
- for {
- spec, err := runner.iterator.Next()
- if err == spec_iterator.ErrClosed {
- break
- }
- if err != nil {
- fmt.Println("failed to iterate over tests:\n" + err.Error())
- break
- }
-
- runner.processedSpecs = append(runner.processedSpecs, spec)
-
- summary := spec.Summary(runner.suiteID)
- runner.reportSpecWillRun(summary)
- if summary.State == types.SpecStateInvalid {
- summary.State = types.SpecStatePassed
- }
- runner.reportSpecDidComplete(summary, false)
- }
-
- if runner.afterSuiteNode != nil {
- summary := runner.afterSuiteNode.Summary()
- summary.State = types.SpecStatePassed
- runner.reportAfterSuite(summary)
- }
-
- runner.reportSuiteDidEnd(true)
-}
-
-func (runner *SpecRunner) runBeforeSuite() bool {
- if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
- return true
- }
-
- runner.writer.Truncate()
- conf := runner.config
- passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
- if !passed {
- runner.writer.DumpOut()
- }
- runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
- return passed
-}
-
-func (runner *SpecRunner) runAfterSuite() bool {
- if runner.afterSuiteNode == nil {
- return true
- }
-
- runner.writer.Truncate()
- conf := runner.config
- passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
- if !passed {
- runner.writer.DumpOut()
- }
- runner.reportAfterSuite(runner.afterSuiteNode.Summary())
- return passed
-}
-
-func (runner *SpecRunner) runSpecs() bool {
- suiteFailed := false
- skipRemainingSpecs := false
- for {
- spec, err := runner.iterator.Next()
- if err == spec_iterator.ErrClosed {
- break
- }
- if err != nil {
- fmt.Println("failed to iterate over tests:\n" + err.Error())
- suiteFailed = true
- break
- }
-
- runner.processedSpecs = append(runner.processedSpecs, spec)
-
- if runner.wasInterrupted() {
- break
- }
- if skipRemainingSpecs {
- spec.Skip()
- }
-
- if !spec.Skipped() && !spec.Pending() {
- if passed := runner.runSpec(spec); !passed {
- suiteFailed = true
- }
- } else if spec.Pending() && runner.config.FailOnPending {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- suiteFailed = true
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- } else {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- }
-
- if spec.Failed() && runner.config.FailFast {
- skipRemainingSpecs = true
- }
- }
-
- return !suiteFailed
-}
-
-func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
- maxAttempts := 1
- if runner.config.FlakeAttempts > 0 {
- // uninitialized configs count as 1
- maxAttempts = runner.config.FlakeAttempts
- }
-
- for i := 0; i < maxAttempts; i++ {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- runner.runningSpec = spec
- spec.Run(runner.writer)
- runner.runningSpec = nil
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- if !spec.Failed() {
- return true
- }
- }
- return false
-}
-
-func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
- if runner.runningSpec == nil {
- return nil, false
- }
-
- return runner.runningSpec.Summary(runner.suiteID), true
-}
-
-func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
- close(signalRegistered)
-
- <-c
- signal.Stop(c)
- runner.markInterrupted()
- go runner.registerForHardInterrupts()
- runner.writer.DumpOutWithHeader(`
-Received interrupt. Emitting contents of GinkgoWriter...
----------------------------------------------------------
-`)
- if runner.afterSuiteNode != nil {
- fmt.Fprint(os.Stderr, `
----------------------------------------------------------
-Received interrupt. Running AfterSuite...
-^C again to terminate immediately
-`)
- runner.runAfterSuite()
- }
- runner.reportSuiteDidEnd(false)
- os.Exit(1)
-}
-
-func (runner *SpecRunner) registerForHardInterrupts() {
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
-
- <-c
- fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
- os.Exit(1)
-}
-
-func (runner *SpecRunner) blockForeverIfInterrupted() {
- runner.lock.Lock()
- interrupted := runner.interrupted
- runner.lock.Unlock()
-
- if interrupted {
- select {}
- }
-}
-
-func (runner *SpecRunner) markInterrupted() {
- runner.lock.Lock()
- defer runner.lock.Unlock()
- runner.interrupted = true
-}
-
-func (runner *SpecRunner) wasInterrupted() bool {
- runner.lock.Lock()
- defer runner.lock.Unlock()
- return runner.interrupted
-}
-
-func (runner *SpecRunner) reportSuiteWillBegin() {
- runner.startTime = time.Now()
- summary := runner.suiteWillBeginSummary()
- for _, reporter := range runner.reporters {
- reporter.SpecSuiteWillBegin(runner.config, summary)
- }
-}
-
-func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
- for _, reporter := range runner.reporters {
- reporter.BeforeSuiteDidRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
- for _, reporter := range runner.reporters {
- reporter.AfterSuiteDidRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
- runner.writer.Truncate()
-
- for _, reporter := range runner.reporters {
- reporter.SpecWillRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
- if failed && len(summary.CapturedOutput) == 0 {
- summary.CapturedOutput = string(runner.writer.Bytes())
- }
- for i := len(runner.reporters) - 1; i >= 1; i-- {
- runner.reporters[i].SpecDidComplete(summary)
- }
-
- if failed {
- runner.writer.DumpOut()
- }
-
- runner.reporters[0].SpecDidComplete(summary)
-}
-
-func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
- summary := runner.suiteDidEndSummary(success)
- summary.RunTime = time.Since(runner.startTime)
- for _, reporter := range runner.reporters {
- reporter.SpecSuiteDidEnd(summary)
- }
-}
-
-func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
- count = 0
-
- for _, spec := range runner.processedSpecs {
- if filter(spec) {
- count++
- }
- }
-
- return count
-}
-
-func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
- numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return !ex.Skipped() && !ex.Pending()
- })
-
- numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Pending()
- })
-
- numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Skipped()
- })
-
- numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Passed()
- })
-
- numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Flaked()
- })
-
- numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Failed()
- })
-
- if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
- var known bool
- numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
- if !known {
- numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
- }
- numberOfFailedSpecs = numberOfSpecsThatWillBeRun
- }
-
- return &types.SuiteSummary{
- SuiteDescription: runner.description,
- SuiteSucceeded: success,
- SuiteID: runner.suiteID,
-
- NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
- NumberOfTotalSpecs: len(runner.processedSpecs),
- NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
- NumberOfPendingSpecs: numberOfPendingSpecs,
- NumberOfSkippedSpecs: numberOfSkippedSpecs,
- NumberOfPassedSpecs: numberOfPassedSpecs,
- NumberOfFailedSpecs: numberOfFailedSpecs,
- NumberOfFlakedSpecs: numberOfFlakedSpecs,
- }
-}
-
-func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
- numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
- if !known {
- numTotal = -1
- }
-
- numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
- if !known {
- numToRun = -1
- }
-
- return &types.SuiteSummary{
- SuiteDescription: runner.description,
- SuiteID: runner.suiteID,
-
- NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
- NumberOfTotalSpecs: numTotal,
- NumberOfSpecsThatWillBeRun: numToRun,
- NumberOfPendingSpecs: -1,
- NumberOfSkippedSpecs: -1,
- NumberOfPassedSpecs: -1,
- NumberOfFailedSpecs: -1,
- NumberOfFlakedSpecs: -1,
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
deleted file mode 100644
index 3104bbc8..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package suite
-
-import (
- "math/rand"
- "net/http"
- "time"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- "github.com/onsi/ginkgo/internal/specrunner"
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-)
-
-type ginkgoTestingT interface {
- Fail()
-}
-
-type Suite struct {
- topLevelContainer *containernode.ContainerNode
- currentContainer *containernode.ContainerNode
- containerIndex int
- beforeSuiteNode leafnodes.SuiteNode
- afterSuiteNode leafnodes.SuiteNode
- runner *specrunner.SpecRunner
- failer *failer.Failer
- running bool
-}
-
-func New(failer *failer.Failer) *Suite {
- topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
-
- return &Suite{
- topLevelContainer: topLevelContainer,
- currentContainer: topLevelContainer,
- failer: failer,
- containerIndex: 1,
- }
-}
-
-func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
- if config.ParallelTotal < 1 {
- panic("ginkgo.parallel.total must be >= 1")
- }
-
- if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
- panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
- }
-
- r := rand.New(rand.NewSource(config.RandomSeed))
- suite.topLevelContainer.Shuffle(r)
- iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
- suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
-
- suite.running = true
- success := suite.runner.Run()
- if !success {
- t.Fail()
- }
- return success, hasProgrammaticFocus
-}
-
-func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
- specsSlice := []*spec.Spec{}
- suite.topLevelContainer.BackPropagateProgrammaticFocus()
- for _, collatedNodes := range suite.topLevelContainer.Collate() {
- specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
- }
-
- specs := spec.NewSpecs(specsSlice)
- specs.RegexScansFilePath = config.RegexScansFilePath
-
- if config.RandomizeAllSpecs {
- specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
- }
-
- specs.ApplyFocus(description, config.FocusString, config.SkipString)
-
- if config.SkipMeasurements {
- specs.SkipMeasurements()
- }
-
- var iterator spec_iterator.SpecIterator
-
- if config.ParallelTotal > 1 {
- iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
- resp, err := http.Get(config.SyncHost + "/has-counter")
- if err != nil || resp.StatusCode != http.StatusOK {
- iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
- }
- } else {
- iterator = spec_iterator.NewSerialIterator(specs.Specs())
- }
-
- return iterator, specs.HasProgrammaticFocus()
-}
-
-func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
- return suite.runner.CurrentSpecSummary()
-}
-
-func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.beforeSuiteNode != nil {
- panic("You may only call BeforeSuite once!")
- }
- suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.afterSuiteNode != nil {
- panic("You may only call AfterSuite once!")
- }
- suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.beforeSuiteNode != nil {
- panic("You may only call BeforeSuite once!")
- }
- suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.afterSuiteNode != nil {
- panic("You may only call AfterSuite once!")
- }
- suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
- container := containernode.New(text, flag, codeLocation)
- suite.currentContainer.PushContainerNode(container)
-
- previousContainer := suite.currentContainer
- suite.currentContainer = container
- suite.containerIndex++
-
- body()
-
- suite.containerIndex--
- suite.currentContainer = previousContainer
-}
-
-func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
- if suite.running {
- suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
deleted file mode 100644
index 090445d0..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package testingtproxy
-
-import (
- "fmt"
- "io"
-)
-
-type failFunc func(message string, callerSkip ...int)
-
-func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
- return &ginkgoTestingTProxy{
- fail: fail,
- offset: offset,
- writer: writer,
- }
-}
-
-type ginkgoTestingTProxy struct {
- fail failFunc
- offset int
- writer io.Writer
-}
-
-func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
- t.fail(fmt.Sprintln(args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
- t.fail(fmt.Sprintf(format, args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fail() {
- t.fail("failed", t.offset)
-}
-
-func (t *ginkgoTestingTProxy) FailNow() {
- t.fail("failed", t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
- t.fail(fmt.Sprintln(args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
- t.fail(fmt.Sprintf(format, args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
- fmt.Fprintln(t.writer, args...)
-}
-
-func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
- t.Log(fmt.Sprintf(format, args...))
-}
-
-func (t *ginkgoTestingTProxy) Failed() bool {
- return false
-}
-
-func (t *ginkgoTestingTProxy) Parallel() {
-}
-
-func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
- fmt.Println(args...)
-}
-
-func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
- t.Skip(fmt.Sprintf(format, args...))
-}
-
-func (t *ginkgoTestingTProxy) SkipNow() {
-}
-
-func (t *ginkgoTestingTProxy) Skipped() bool {
- return false
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
deleted file mode 100644
index 6739c3f6..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package writer
-
-type FakeGinkgoWriter struct {
- EventStream []string
-}
-
-func NewFake() *FakeGinkgoWriter {
- return &FakeGinkgoWriter{
- EventStream: []string{},
- }
-}
-
-func (writer *FakeGinkgoWriter) AddEvent(event string) {
- writer.EventStream = append(writer.EventStream, event)
-}
-
-func (writer *FakeGinkgoWriter) Truncate() {
- writer.EventStream = append(writer.EventStream, "TRUNCATE")
-}
-
-func (writer *FakeGinkgoWriter) DumpOut() {
- writer.EventStream = append(writer.EventStream, "DUMP")
-}
-
-func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
- writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
-}
-
-func (writer *FakeGinkgoWriter) Bytes() []byte {
- writer.EventStream = append(writer.EventStream, "BYTES")
- return nil
-}
-
-func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
- return 0, nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
deleted file mode 100644
index 98eca3bd..00000000
--- a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package writer
-
-import (
- "bytes"
- "io"
- "sync"
-)
-
-type WriterInterface interface {
- io.Writer
-
- Truncate()
- DumpOut()
- DumpOutWithHeader(header string)
- Bytes() []byte
-}
-
-type Writer struct {
- buffer *bytes.Buffer
- outWriter io.Writer
- lock *sync.Mutex
- stream bool
- redirector io.Writer
-}
-
-func New(outWriter io.Writer) *Writer {
- return &Writer{
- buffer: &bytes.Buffer{},
- lock: &sync.Mutex{},
- outWriter: outWriter,
- stream: true,
- }
-}
-
-func (w *Writer) AndRedirectTo(writer io.Writer) {
- w.redirector = writer
-}
-
-func (w *Writer) SetStream(stream bool) {
- w.lock.Lock()
- defer w.lock.Unlock()
- w.stream = stream
-}
-
-func (w *Writer) Write(b []byte) (n int, err error) {
- w.lock.Lock()
- defer w.lock.Unlock()
-
- n, err = w.buffer.Write(b)
- if w.redirector != nil {
- w.redirector.Write(b)
- }
- if w.stream {
- return w.outWriter.Write(b)
- }
- return n, err
-}
-
-func (w *Writer) Truncate() {
- w.lock.Lock()
- defer w.lock.Unlock()
- w.buffer.Reset()
-}
-
-func (w *Writer) DumpOut() {
- w.lock.Lock()
- defer w.lock.Unlock()
- if !w.stream {
- w.buffer.WriteTo(w.outWriter)
- }
-}
-
-func (w *Writer) Bytes() []byte {
- w.lock.Lock()
- defer w.lock.Unlock()
- b := w.buffer.Bytes()
- copied := make([]byte, len(b))
- copy(copied, b)
- return copied
-}
-
-func (w *Writer) DumpOutWithHeader(header string) {
- w.lock.Lock()
- defer w.lock.Unlock()
- if !w.stream && w.buffer.Len() > 0 {
- w.outWriter.Write([]byte(header))
- w.buffer.WriteTo(w.outWriter)
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
deleted file mode 100644
index ac58dd5f..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
-Ginkgo's Default Reporter
-
-A number of command line flags are available to tweak Ginkgo's default output.
-
-These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
-*/
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
-)
-
-type DefaultReporter struct {
- config config.DefaultReporterConfigType
- stenographer stenographer.Stenographer
- specSummaries []*types.SpecSummary
-}
-
-func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
- return &DefaultReporter{
- config: config,
- stenographer: stenographer,
- }
-}
-
-func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
- if config.ParallelTotal > 1 {
- reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
- } else {
- reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
- }
-}
-
-func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-}
-
-func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-}
-
-func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
- reporter.stenographer.AnnounceSpecWillRun(specSummary)
- }
-}
-
-func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- switch specSummary.State {
- case types.SpecStatePassed:
- if specSummary.IsMeasurement {
- reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
- } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
- reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
- } else {
- reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
- }
- case types.SpecStatePending:
- reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
- case types.SpecStateSkipped:
- reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
- case types.SpecStateTimedOut:
- reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- case types.SpecStatePanicked:
- reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- case types.SpecStateFailed:
- reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-
- reporter.specSummaries = append(reporter.specSummaries, specSummary)
-}
-
-func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.stenographer.SummarizeFailures(reporter.specSummaries)
- reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
deleted file mode 100644
index 27db4794..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-//FakeReporter is useful for testing purposes
-type FakeReporter struct {
- Config config.GinkgoConfigType
-
- BeginSummary *types.SuiteSummary
- BeforeSuiteSummary *types.SetupSummary
- SpecWillRunSummaries []*types.SpecSummary
- SpecSummaries []*types.SpecSummary
- AfterSuiteSummary *types.SetupSummary
- EndSummary *types.SuiteSummary
-
- SpecWillRunStub func(specSummary *types.SpecSummary)
- SpecDidCompleteStub func(specSummary *types.SpecSummary)
-}
-
-func NewFakeReporter() *FakeReporter {
- return &FakeReporter{
- SpecWillRunSummaries: make([]*types.SpecSummary, 0),
- SpecSummaries: make([]*types.SpecSummary, 0),
- }
-}
-
-func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- fakeR.Config = config
- fakeR.BeginSummary = summary
-}
-
-func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- fakeR.BeforeSuiteSummary = setupSummary
-}
-
-func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if fakeR.SpecWillRunStub != nil {
- fakeR.SpecWillRunStub(specSummary)
- }
- fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
-}
-
-func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- if fakeR.SpecDidCompleteStub != nil {
- fakeR.SpecDidCompleteStub(specSummary)
- }
- fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
-}
-
-func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- fakeR.AfterSuiteSummary = setupSummary
-}
-
-func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- fakeR.EndSummary = summary
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
deleted file mode 100644
index 2c9f3c79..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
-
-JUnit XML Reporter for Ginkgo
-
-For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
-
-*/
-
-package reporters
-
-import (
- "encoding/xml"
- "fmt"
- "math"
- "os"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-type JUnitTestSuite struct {
- XMLName xml.Name `xml:"testsuite"`
- TestCases []JUnitTestCase `xml:"testcase"`
- Name string `xml:"name,attr"`
- Tests int `xml:"tests,attr"`
- Failures int `xml:"failures,attr"`
- Errors int `xml:"errors,attr"`
- Time float64 `xml:"time,attr"`
-}
-
-type JUnitTestCase struct {
- Name string `xml:"name,attr"`
- ClassName string `xml:"classname,attr"`
- FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
- Skipped *JUnitSkipped `xml:"skipped,omitempty"`
- Time float64 `xml:"time,attr"`
- SystemOut string `xml:"system-out,omitempty"`
-}
-
-type JUnitFailureMessage struct {
- Type string `xml:"type,attr"`
- Message string `xml:",chardata"`
-}
-
-type JUnitSkipped struct {
- XMLName xml.Name `xml:"skipped"`
-}
-
-type JUnitReporter struct {
- suite JUnitTestSuite
- filename string
- testSuiteName string
-}
-
-//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
-func NewJUnitReporter(filename string) *JUnitReporter {
- return &JUnitReporter{
- filename: filename,
- }
-}
-
-func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.suite = JUnitTestSuite{
- Name: summary.SuiteDescription,
- TestCases: []JUnitTestCase{},
- }
- reporter.testSuiteName = summary.SuiteDescription
-}
-
-func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
-}
-
-func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("BeforeSuite", setupSummary)
-}
-
-func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("AfterSuite", setupSummary)
-}
-
-func failureMessage(failure types.SpecFailure) string {
- return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
-}
-
-func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- testCase := JUnitTestCase{
- Name: name,
- ClassName: reporter.testSuiteName,
- }
-
- testCase.FailureMessage = &JUnitFailureMessage{
- Type: reporter.failureTypeForState(setupSummary.State),
- Message: failureMessage(setupSummary.Failure),
- }
- testCase.SystemOut = setupSummary.CapturedOutput
- testCase.Time = setupSummary.RunTime.Seconds()
- reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
- }
-}
-
-func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- testCase := JUnitTestCase{
- Name: strings.Join(specSummary.ComponentTexts[1:], " "),
- ClassName: reporter.testSuiteName,
- }
- if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
- testCase.FailureMessage = &JUnitFailureMessage{
- Type: reporter.failureTypeForState(specSummary.State),
- Message: failureMessage(specSummary.Failure),
- }
- testCase.SystemOut = specSummary.CapturedOutput
- }
- if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
- testCase.Skipped = &JUnitSkipped{}
- }
- testCase.Time = specSummary.RunTime.Seconds()
- reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
-}
-
-func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
- reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
- reporter.suite.Failures = summary.NumberOfFailedSpecs
- reporter.suite.Errors = 0
- file, err := os.Create(reporter.filename)
- if err != nil {
- fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
- }
- defer file.Close()
- file.WriteString(xml.Header)
- encoder := xml.NewEncoder(file)
- encoder.Indent(" ", " ")
- err = encoder.Encode(reporter.suite)
- if err != nil {
- fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
- }
-}
-
-func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
- switch state {
- case types.SpecStateFailed:
- return "Failure"
- case types.SpecStateTimedOut:
- return "Timeout"
- case types.SpecStatePanicked:
- return "Panic"
- default:
- return ""
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go
deleted file mode 100644
index 348b9dfc..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/reporter.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-type Reporter interface {
- SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
- BeforeSuiteDidRun(setupSummary *types.SetupSummary)
- SpecWillRun(specSummary *types.SpecSummary)
- SpecDidComplete(specSummary *types.SpecSummary)
- AfterSuiteDidRun(setupSummary *types.SetupSummary)
- SpecSuiteDidEnd(summary *types.SuiteSummary)
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
deleted file mode 100644
index 45b8f886..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package stenographer
-
-import (
- "fmt"
- "strings"
-)
-
-func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
- var out string
-
- if len(args) > 0 {
- out = fmt.Sprintf(format, args...)
- } else {
- out = format
- }
-
- if s.color {
- return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
- } else {
- return out
- }
-}
-
-func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
- fmt.Fprintln(s.w, text)
- fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
-}
-
-func (s *consoleStenographer) printNewLine() {
- fmt.Fprintln(s.w, "")
-}
-
-func (s *consoleStenographer) printDelimiter() {
- fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
-}
-
-func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
- fmt.Fprint(s.w, s.indent(indentation, format, args...))
-}
-
-func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
- fmt.Fprintln(s.w, s.indent(indentation, format, args...))
-}
-
-func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
- var text string
-
- if len(args) > 0 {
- text = fmt.Sprintf(format, args...)
- } else {
- text = format
- }
-
- stringArray := strings.Split(text, "\n")
- padding := ""
- if indentation >= 0 {
- padding = strings.Repeat(" ", indentation)
- }
- for i, s := range stringArray {
- stringArray[i] = fmt.Sprintf("%s%s", padding, s)
- }
-
- return strings.Join(stringArray, "\n")
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
deleted file mode 100644
index 98854e7d..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package stenographer
-
-import (
- "sync"
-
- "github.com/onsi/ginkgo/types"
-)
-
-func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
- return FakeStenographerCall{
- Method: method,
- Args: args,
- }
-}
-
-type FakeStenographer struct {
- calls []FakeStenographerCall
- lock *sync.Mutex
-}
-
-type FakeStenographerCall struct {
- Method string
- Args []interface{}
-}
-
-func NewFakeStenographer() *FakeStenographer {
- stenographer := &FakeStenographer{
- lock: &sync.Mutex{},
- }
- stenographer.Reset()
- return stenographer
-}
-
-func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- return stenographer.calls
-}
-
-func (stenographer *FakeStenographer) Reset() {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- stenographer.calls = make([]FakeStenographerCall, 0)
-}
-
-func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- results := make([]FakeStenographerCall, 0)
- for _, call := range stenographer.calls {
- if call.Method == method {
- results = append(results, call)
- }
- }
-
- return results
-}
-
-func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
-}
-
-func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
- stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
- stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
- stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
- stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
- stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
- stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSpecWillRun", spec)
-}
-
-func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
-}
-func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
- stenographer.registerCall("AnnounceCapturedOutput", output)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSuccesfulSpec", spec)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
- stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
-}
-
-func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
- stenographer.registerCall("SummarizeFailures", summaries)
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
deleted file mode 100644
index 601c74d6..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
-The stenographer is used by Ginkgo's reporters to generate output.
-
-Move along, nothing to see here.
-*/
-
-package stenographer
-
-import (
- "fmt"
- "io"
- "runtime"
- "strings"
-
- "github.com/onsi/ginkgo/types"
-)
-
-const defaultStyle = "\x1b[0m"
-const boldStyle = "\x1b[1m"
-const redColor = "\x1b[91m"
-const greenColor = "\x1b[32m"
-const yellowColor = "\x1b[33m"
-const cyanColor = "\x1b[36m"
-const grayColor = "\x1b[90m"
-const lightGrayColor = "\x1b[37m"
-
-type cursorStateType int
-
-const (
- cursorStateTop cursorStateType = iota
- cursorStateStreaming
- cursorStateMidBlock
- cursorStateEndBlock
-)
-
-type Stenographer interface {
- AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
- AnnounceAggregatedParallelRun(nodes int, succinct bool)
- AnnounceParallelRun(node int, nodes int, succinct bool)
- AnnounceTotalNumberOfSpecs(total int, succinct bool)
- AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
- AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
-
- AnnounceSpecWillRun(spec *types.SpecSummary)
- AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
- AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
-
- AnnounceCapturedOutput(output string)
-
- AnnounceSuccesfulSpec(spec *types.SpecSummary)
- AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
- AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
-
- AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
- AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
-
- AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
- AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
- AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
-
- SummarizeFailures(summaries []*types.SpecSummary)
-}
-
-func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
- denoter := "•"
- if runtime.GOOS == "windows" {
- denoter = "+"
- }
- return &consoleStenographer{
- color: color,
- denoter: denoter,
- cursorState: cursorStateTop,
- enableFlakes: enableFlakes,
- w: writer,
- }
-}
-
-type consoleStenographer struct {
- color bool
- denoter string
- cursorState cursorStateType
- enableFlakes bool
- w io.Writer
-}
-
-var alternatingColors = []string{defaultStyle, grayColor}
-
-func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
- if succinct {
- s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
- return
- }
- s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
- s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
- if randomizingAll {
- s.print(0, " - Will randomize all specs")
- }
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
- if succinct {
- s.print(0, "- node #%d ", node)
- return
- }
- s.println(0,
- "Parallel test node %s/%s.",
- s.colorize(boldStyle, "%d", node),
- s.colorize(boldStyle, "%d", nodes),
- )
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
- if succinct {
- s.print(0, "- %d nodes ", nodes)
- return
- }
- s.println(0,
- "Running in parallel across %s nodes",
- s.colorize(boldStyle, "%d", nodes),
- )
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
- if succinct {
- s.print(0, "- %d/%d specs ", specsToRun, total)
- s.stream()
- return
- }
- s.println(0,
- "Will run %s of %s specs",
- s.colorize(boldStyle, "%d", specsToRun),
- s.colorize(boldStyle, "%d", total),
- )
-
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
- if succinct {
- s.print(0, "- %d specs ", total)
- s.stream()
- return
- }
- s.println(0,
- "Will run %s specs",
- s.colorize(boldStyle, "%d", total),
- )
-
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
- if succinct && summary.SuiteSucceeded {
- s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
- return
- }
- s.printNewLine()
- color := greenColor
- if !summary.SuiteSucceeded {
- color = redColor
- }
- s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
-
- status := ""
- if summary.SuiteSucceeded {
- status = s.colorize(boldStyle+greenColor, "SUCCESS!")
- } else {
- status = s.colorize(boldStyle+redColor, "FAIL!")
- }
-
- flakes := ""
- if s.enableFlakes {
- flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
- }
-
- s.print(0,
- "%s -- %s | %s | %s | %s\n",
- status,
- s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
- s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
- s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
- s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
- )
-}
-
-func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
- s.startBlock()
- for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
- s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
- }
-
- indentation := 0
- if len(spec.ComponentTexts) > 2 {
- indentation = 1
- s.printNewLine()
- }
- index := len(spec.ComponentTexts) - 1
- s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
- s.printNewLine()
- s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
- s.printNewLine()
- s.midBlock()
-}
-
-func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.startBlock()
- var message string
- switch summary.State {
- case types.SpecStateFailed:
- message = "Failure"
- case types.SpecStatePanicked:
- message = "Panic"
- case types.SpecStateTimedOut:
- message = "Timeout"
- }
-
- s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
-
- s.printNewLine()
- s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
-
- s.endBlock()
-}
-
-func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
- if output == "" {
- return
- }
-
- s.startBlock()
- s.println(0, output)
- s.midBlock()
-}
-
-func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
- s.print(0, s.colorize(greenColor, s.denoter))
- s.stream()
-}
-
-func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- s.printBlockWithMessage(
- s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
- "",
- spec,
- succinct,
- )
-}
-
-func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
- s.printBlockWithMessage(
- s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
- s.measurementReport(spec, succinct),
- spec,
- succinct,
- )
-}
-
-func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
- if noisy {
- s.printBlockWithMessage(
- s.colorize(yellowColor, "P [PENDING]"),
- "",
- spec,
- false,
- )
- } else {
- s.print(0, s.colorize(yellowColor, "P"))
- s.stream()
- }
-}
-
-func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- // Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
- if succinct || spec.Failure == (types.SpecFailure{}) {
- s.print(0, s.colorize(cyanColor, "S"))
- s.stream()
- } else {
- s.startBlock()
- s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
-
- s.printNewLine()
- s.printSkip(indentation, spec.Failure)
- s.endBlock()
- }
-}
-
-func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
- failingSpecs := []*types.SpecSummary{}
-
- for _, summary := range summaries {
- if summary.HasFailureState() {
- failingSpecs = append(failingSpecs, summary)
- }
- }
-
- if len(failingSpecs) == 0 {
- return
- }
-
- s.printNewLine()
- s.printNewLine()
- plural := "s"
- if len(failingSpecs) == 1 {
- plural = ""
- }
- s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
- for _, summary := range failingSpecs {
- s.printNewLine()
- if summary.HasFailureState() {
- if summary.TimedOut() {
- s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
- } else if summary.Panicked() {
- s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
- } else if summary.Failed() {
- s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
- }
- s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
- s.printNewLine()
- s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
- }
- }
-}
-
-func (s *consoleStenographer) startBlock() {
- if s.cursorState == cursorStateStreaming {
- s.printNewLine()
- s.printDelimiter()
- } else if s.cursorState == cursorStateMidBlock {
- s.printNewLine()
- }
-}
-
-func (s *consoleStenographer) midBlock() {
- s.cursorState = cursorStateMidBlock
-}
-
-func (s *consoleStenographer) endBlock() {
- s.printDelimiter()
- s.cursorState = cursorStateEndBlock
-}
-
-func (s *consoleStenographer) stream() {
- s.cursorState = cursorStateStreaming
-}
-
-func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
- s.startBlock()
- s.println(0, header)
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
-
- if message != "" {
- s.printNewLine()
- s.println(indentation, message)
- }
-
- s.endBlock()
-}
-
-func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.startBlock()
- s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
-
- s.printNewLine()
- s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
- s.endBlock()
-}
-
-func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
- switch failedComponentType {
- case types.SpecComponentTypeBeforeSuite:
- return " in Suite Setup (BeforeSuite)"
- case types.SpecComponentTypeAfterSuite:
- return " in Suite Teardown (AfterSuite)"
- case types.SpecComponentTypeBeforeEach:
- return " in Spec Setup (BeforeEach)"
- case types.SpecComponentTypeJustBeforeEach:
- return " in Spec Setup (JustBeforeEach)"
- case types.SpecComponentTypeAfterEach:
- return " in Spec Teardown (AfterEach)"
- }
-
- return ""
-}
-
-func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
- s.println(indentation, s.colorize(cyanColor, spec.Message))
- s.printNewLine()
- s.println(indentation, spec.Location.String())
-}
-
-func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
- if state == types.SpecStatePanicked {
- s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
- s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
- s.println(indentation, failure.Location.String())
- s.printNewLine()
- s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
- s.println(indentation, failure.Location.FullStackTrace)
- } else {
- s.println(indentation, s.colorize(redColor, failure.Message))
- s.printNewLine()
- s.println(indentation, failure.Location.String())
- if fullTrace {
- s.printNewLine()
- s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
- s.println(indentation, failure.Location.FullStackTrace)
- }
- }
-}
-
-func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
- startIndex := 1
- indentation := 0
-
- if len(componentTexts) == 1 {
- startIndex = 0
- }
-
- for i := startIndex; i < len(componentTexts); i++ {
- if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
- color := redColor
- if state == types.SpecStateSkipped {
- color = cyanColor
- }
- blockType := ""
- switch failedComponentType {
- case types.SpecComponentTypeBeforeSuite:
- blockType = "BeforeSuite"
- case types.SpecComponentTypeAfterSuite:
- blockType = "AfterSuite"
- case types.SpecComponentTypeBeforeEach:
- blockType = "BeforeEach"
- case types.SpecComponentTypeJustBeforeEach:
- blockType = "JustBeforeEach"
- case types.SpecComponentTypeAfterEach:
- blockType = "AfterEach"
- case types.SpecComponentTypeIt:
- blockType = "It"
- case types.SpecComponentTypeMeasure:
- blockType = "Measurement"
- }
- if succinct {
- s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
- } else {
- s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
- s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
- }
- } else {
- if succinct {
- s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
- } else {
- s.println(indentation, componentTexts[i])
- s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
- }
- }
- indentation++
- }
-
- return indentation
-}
-
-func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
- indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
-
- if succinct {
- if len(componentTexts) > 0 {
- s.printNewLine()
- s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
- }
- s.printNewLine()
- indentation = 1
- } else {
- indentation--
- }
-
- return indentation
-}
-
-func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
- orderedKeys := make([]string, len(measurements))
- for key, measurement := range measurements {
- orderedKeys[measurement.Order] = key
- }
- return orderedKeys
-}
-
-func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
- if len(spec.Measurements) == 0 {
- return "Found no measurements"
- }
-
- message := []string{}
- orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
-
- if succinct {
- message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
- for _, key := range orderedKeys {
- measurement := spec.Measurements[key]
- message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
- s.colorize(boldStyle, "%s", measurement.Name),
- measurement.SmallestLabel,
- s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
- measurement.Units,
- measurement.AverageLabel,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
- measurement.Units,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
- measurement.Units,
- measurement.LargestLabel,
- s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
- measurement.Units,
- ))
- }
- } else {
- message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
- for _, key := range orderedKeys {
- measurement := spec.Measurements[key]
- info := ""
- if measurement.Info != nil {
- message = append(message, fmt.Sprintf("%v", measurement.Info))
- }
-
- message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
- s.colorize(boldStyle, "%s", measurement.Name),
- info,
- measurement.SmallestLabel,
- s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
- measurement.Units,
- measurement.LargestLabel,
- s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
- measurement.Units,
- measurement.AverageLabel,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
- measurement.Units,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
- measurement.Units,
- ))
- }
- }
-
- return strings.Join(message, "\n")
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
deleted file mode 100644
index 91b5cef3..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Yasuhiro Matsumoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
deleted file mode 100644
index e84226a7..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# go-colorable
-
-Colorable writer for windows.
-
-For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
-This package is possible to handle escape sequence for ansi color on windows.
-
-## Too Bad!
-
-![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
-
-
-## So Good!
-
-![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
-
-## Usage
-
-```go
-logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
-logrus.SetOutput(colorable.NewColorableStdout())
-
-logrus.Info("succeeded")
-logrus.Warn("not correct")
-logrus.Error("something error")
-logrus.Fatal("panic")
-```
-
-You can compile above code on non-windows OSs.
-
-## Installation
-
-```
-$ go get github.com/mattn/go-colorable
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
deleted file mode 100644
index 52d6653b..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !windows
-
-package colorable
-
-import (
- "io"
- "os"
-)
-
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- return file
-}
-
-func NewColorableStdout() io.Writer {
- return os.Stdout
-}
-
-func NewColorableStderr() io.Writer {
- return os.Stderr
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
deleted file mode 100644
index 10880092..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
+++ /dev/null
@@ -1,783 +0,0 @@
-package colorable
-
-import (
- "bytes"
- "fmt"
- "io"
- "math"
- "os"
- "strconv"
- "strings"
- "syscall"
- "unsafe"
-
- "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty"
-)
-
-const (
- foregroundBlue = 0x1
- foregroundGreen = 0x2
- foregroundRed = 0x4
- foregroundIntensity = 0x8
- foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
- backgroundBlue = 0x10
- backgroundGreen = 0x20
- backgroundRed = 0x40
- backgroundIntensity = 0x80
- backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
-)
-
-type wchar uint16
-type short int16
-type dword uint32
-type word uint16
-
-type coord struct {
- x short
- y short
-}
-
-type smallRect struct {
- left short
- top short
- right short
- bottom short
-}
-
-type consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
-}
-
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
- procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
- procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
- procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
- procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
-)
-
-type Writer struct {
- out io.Writer
- handle syscall.Handle
- lastbuf bytes.Buffer
- oldattr word
-}
-
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- if isatty.IsTerminal(file.Fd()) {
- var csbi consoleScreenBufferInfo
- handle := syscall.Handle(file.Fd())
- procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
- return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
- } else {
- return file
- }
-}
-
-func NewColorableStdout() io.Writer {
- return NewColorable(os.Stdout)
-}
-
-func NewColorableStderr() io.Writer {
- return NewColorable(os.Stderr)
-}
-
-var color256 = map[int]int{
- 0: 0x000000,
- 1: 0x800000,
- 2: 0x008000,
- 3: 0x808000,
- 4: 0x000080,
- 5: 0x800080,
- 6: 0x008080,
- 7: 0xc0c0c0,
- 8: 0x808080,
- 9: 0xff0000,
- 10: 0x00ff00,
- 11: 0xffff00,
- 12: 0x0000ff,
- 13: 0xff00ff,
- 14: 0x00ffff,
- 15: 0xffffff,
- 16: 0x000000,
- 17: 0x00005f,
- 18: 0x000087,
- 19: 0x0000af,
- 20: 0x0000d7,
- 21: 0x0000ff,
- 22: 0x005f00,
- 23: 0x005f5f,
- 24: 0x005f87,
- 25: 0x005faf,
- 26: 0x005fd7,
- 27: 0x005fff,
- 28: 0x008700,
- 29: 0x00875f,
- 30: 0x008787,
- 31: 0x0087af,
- 32: 0x0087d7,
- 33: 0x0087ff,
- 34: 0x00af00,
- 35: 0x00af5f,
- 36: 0x00af87,
- 37: 0x00afaf,
- 38: 0x00afd7,
- 39: 0x00afff,
- 40: 0x00d700,
- 41: 0x00d75f,
- 42: 0x00d787,
- 43: 0x00d7af,
- 44: 0x00d7d7,
- 45: 0x00d7ff,
- 46: 0x00ff00,
- 47: 0x00ff5f,
- 48: 0x00ff87,
- 49: 0x00ffaf,
- 50: 0x00ffd7,
- 51: 0x00ffff,
- 52: 0x5f0000,
- 53: 0x5f005f,
- 54: 0x5f0087,
- 55: 0x5f00af,
- 56: 0x5f00d7,
- 57: 0x5f00ff,
- 58: 0x5f5f00,
- 59: 0x5f5f5f,
- 60: 0x5f5f87,
- 61: 0x5f5faf,
- 62: 0x5f5fd7,
- 63: 0x5f5fff,
- 64: 0x5f8700,
- 65: 0x5f875f,
- 66: 0x5f8787,
- 67: 0x5f87af,
- 68: 0x5f87d7,
- 69: 0x5f87ff,
- 70: 0x5faf00,
- 71: 0x5faf5f,
- 72: 0x5faf87,
- 73: 0x5fafaf,
- 74: 0x5fafd7,
- 75: 0x5fafff,
- 76: 0x5fd700,
- 77: 0x5fd75f,
- 78: 0x5fd787,
- 79: 0x5fd7af,
- 80: 0x5fd7d7,
- 81: 0x5fd7ff,
- 82: 0x5fff00,
- 83: 0x5fff5f,
- 84: 0x5fff87,
- 85: 0x5fffaf,
- 86: 0x5fffd7,
- 87: 0x5fffff,
- 88: 0x870000,
- 89: 0x87005f,
- 90: 0x870087,
- 91: 0x8700af,
- 92: 0x8700d7,
- 93: 0x8700ff,
- 94: 0x875f00,
- 95: 0x875f5f,
- 96: 0x875f87,
- 97: 0x875faf,
- 98: 0x875fd7,
- 99: 0x875fff,
- 100: 0x878700,
- 101: 0x87875f,
- 102: 0x878787,
- 103: 0x8787af,
- 104: 0x8787d7,
- 105: 0x8787ff,
- 106: 0x87af00,
- 107: 0x87af5f,
- 108: 0x87af87,
- 109: 0x87afaf,
- 110: 0x87afd7,
- 111: 0x87afff,
- 112: 0x87d700,
- 113: 0x87d75f,
- 114: 0x87d787,
- 115: 0x87d7af,
- 116: 0x87d7d7,
- 117: 0x87d7ff,
- 118: 0x87ff00,
- 119: 0x87ff5f,
- 120: 0x87ff87,
- 121: 0x87ffaf,
- 122: 0x87ffd7,
- 123: 0x87ffff,
- 124: 0xaf0000,
- 125: 0xaf005f,
- 126: 0xaf0087,
- 127: 0xaf00af,
- 128: 0xaf00d7,
- 129: 0xaf00ff,
- 130: 0xaf5f00,
- 131: 0xaf5f5f,
- 132: 0xaf5f87,
- 133: 0xaf5faf,
- 134: 0xaf5fd7,
- 135: 0xaf5fff,
- 136: 0xaf8700,
- 137: 0xaf875f,
- 138: 0xaf8787,
- 139: 0xaf87af,
- 140: 0xaf87d7,
- 141: 0xaf87ff,
- 142: 0xafaf00,
- 143: 0xafaf5f,
- 144: 0xafaf87,
- 145: 0xafafaf,
- 146: 0xafafd7,
- 147: 0xafafff,
- 148: 0xafd700,
- 149: 0xafd75f,
- 150: 0xafd787,
- 151: 0xafd7af,
- 152: 0xafd7d7,
- 153: 0xafd7ff,
- 154: 0xafff00,
- 155: 0xafff5f,
- 156: 0xafff87,
- 157: 0xafffaf,
- 158: 0xafffd7,
- 159: 0xafffff,
- 160: 0xd70000,
- 161: 0xd7005f,
- 162: 0xd70087,
- 163: 0xd700af,
- 164: 0xd700d7,
- 165: 0xd700ff,
- 166: 0xd75f00,
- 167: 0xd75f5f,
- 168: 0xd75f87,
- 169: 0xd75faf,
- 170: 0xd75fd7,
- 171: 0xd75fff,
- 172: 0xd78700,
- 173: 0xd7875f,
- 174: 0xd78787,
- 175: 0xd787af,
- 176: 0xd787d7,
- 177: 0xd787ff,
- 178: 0xd7af00,
- 179: 0xd7af5f,
- 180: 0xd7af87,
- 181: 0xd7afaf,
- 182: 0xd7afd7,
- 183: 0xd7afff,
- 184: 0xd7d700,
- 185: 0xd7d75f,
- 186: 0xd7d787,
- 187: 0xd7d7af,
- 188: 0xd7d7d7,
- 189: 0xd7d7ff,
- 190: 0xd7ff00,
- 191: 0xd7ff5f,
- 192: 0xd7ff87,
- 193: 0xd7ffaf,
- 194: 0xd7ffd7,
- 195: 0xd7ffff,
- 196: 0xff0000,
- 197: 0xff005f,
- 198: 0xff0087,
- 199: 0xff00af,
- 200: 0xff00d7,
- 201: 0xff00ff,
- 202: 0xff5f00,
- 203: 0xff5f5f,
- 204: 0xff5f87,
- 205: 0xff5faf,
- 206: 0xff5fd7,
- 207: 0xff5fff,
- 208: 0xff8700,
- 209: 0xff875f,
- 210: 0xff8787,
- 211: 0xff87af,
- 212: 0xff87d7,
- 213: 0xff87ff,
- 214: 0xffaf00,
- 215: 0xffaf5f,
- 216: 0xffaf87,
- 217: 0xffafaf,
- 218: 0xffafd7,
- 219: 0xffafff,
- 220: 0xffd700,
- 221: 0xffd75f,
- 222: 0xffd787,
- 223: 0xffd7af,
- 224: 0xffd7d7,
- 225: 0xffd7ff,
- 226: 0xffff00,
- 227: 0xffff5f,
- 228: 0xffff87,
- 229: 0xffffaf,
- 230: 0xffffd7,
- 231: 0xffffff,
- 232: 0x080808,
- 233: 0x121212,
- 234: 0x1c1c1c,
- 235: 0x262626,
- 236: 0x303030,
- 237: 0x3a3a3a,
- 238: 0x444444,
- 239: 0x4e4e4e,
- 240: 0x585858,
- 241: 0x626262,
- 242: 0x6c6c6c,
- 243: 0x767676,
- 244: 0x808080,
- 245: 0x8a8a8a,
- 246: 0x949494,
- 247: 0x9e9e9e,
- 248: 0xa8a8a8,
- 249: 0xb2b2b2,
- 250: 0xbcbcbc,
- 251: 0xc6c6c6,
- 252: 0xd0d0d0,
- 253: 0xdadada,
- 254: 0xe4e4e4,
- 255: 0xeeeeee,
-}
-
-func (w *Writer) Write(data []byte) (n int, err error) {
- var csbi consoleScreenBufferInfo
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
-
- er := bytes.NewBuffer(data)
-loop:
- for {
- r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- if r1 == 0 {
- break loop
- }
-
- c1, _, err := er.ReadRune()
- if err != nil {
- break loop
- }
- if c1 != 0x1b {
- fmt.Fprint(w.out, string(c1))
- continue
- }
- c2, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- break loop
- }
- if c2 != 0x5b {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- continue
- }
-
- var buf bytes.Buffer
- var m rune
- for {
- c, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- w.lastbuf.Write(buf.Bytes())
- break loop
- }
- if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
- m = c
- break
- }
- buf.Write([]byte(string(c)))
- }
-
- var csbi consoleScreenBufferInfo
- switch m {
- case 'A':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.y -= short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'B':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.y += short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'C':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x -= short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'D':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- if n, err = strconv.Atoi(buf.String()); err == nil {
- var csbi consoleScreenBufferInfo
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x += short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- }
- case 'E':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x = 0
- csbi.cursorPosition.y += short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'F':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x = 0
- csbi.cursorPosition.y -= short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'G':
- n, err = strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
- csbi.cursorPosition.x = short(n)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'H':
- token := strings.Split(buf.String(), ";")
- if len(token) != 2 {
- continue
- }
- n1, err := strconv.Atoi(token[0])
- if err != nil {
- continue
- }
- n2, err := strconv.Atoi(token[1])
- if err != nil {
- continue
- }
- csbi.cursorPosition.x = short(n2)
- csbi.cursorPosition.x = short(n1)
- procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
- case 'J':
- n, err := strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- var cursor coord
- switch n {
- case 0:
- cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
- case 1:
- cursor = coord{x: csbi.window.left, y: csbi.window.top}
- case 2:
- cursor = coord{x: csbi.window.left, y: csbi.window.top}
- }
- var count, written dword
- count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
- procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- case 'K':
- n, err := strconv.Atoi(buf.String())
- if err != nil {
- continue
- }
- var cursor coord
- switch n {
- case 0:
- cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
- case 1:
- cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
- case 2:
- cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
- }
- var count, written dword
- count = dword(csbi.size.x - csbi.cursorPosition.x)
- procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
- case 'm':
- attr := csbi.attributes
- cs := buf.String()
- if cs == "" {
- procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
- continue
- }
- token := strings.Split(cs, ";")
- for i := 0; i < len(token); i += 1 {
- ns := token[i]
- if n, err = strconv.Atoi(ns); err == nil {
- switch {
- case n == 0 || n == 100:
- attr = w.oldattr
- case 1 <= n && n <= 5:
- attr |= foregroundIntensity
- case n == 7:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
- case 22 == n || n == 25 || n == 25:
- attr |= foregroundIntensity
- case n == 27:
- attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
- case 30 <= n && n <= 37:
- attr = (attr & backgroundMask)
- if (n-30)&1 != 0 {
- attr |= foregroundRed
- }
- if (n-30)&2 != 0 {
- attr |= foregroundGreen
- }
- if (n-30)&4 != 0 {
- attr |= foregroundBlue
- }
- case n == 38: // set foreground color.
- if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
- if n256, err := strconv.Atoi(token[i+2]); err == nil {
- if n256foreAttr == nil {
- n256setup()
- }
- attr &= backgroundMask
- attr |= n256foreAttr[n256]
- i += 2
- }
- } else {
- attr = attr & (w.oldattr & backgroundMask)
- }
- case n == 39: // reset foreground color.
- attr &= backgroundMask
- attr |= w.oldattr & foregroundMask
- case 40 <= n && n <= 47:
- attr = (attr & foregroundMask)
- if (n-40)&1 != 0 {
- attr |= backgroundRed
- }
- if (n-40)&2 != 0 {
- attr |= backgroundGreen
- }
- if (n-40)&4 != 0 {
- attr |= backgroundBlue
- }
- case n == 48: // set background color.
- if i < len(token)-2 && token[i+1] == "5" {
- if n256, err := strconv.Atoi(token[i+2]); err == nil {
- if n256backAttr == nil {
- n256setup()
- }
- attr &= foregroundMask
- attr |= n256backAttr[n256]
- i += 2
- }
- } else {
- attr = attr & (w.oldattr & foregroundMask)
- }
- case n == 49: // reset foreground color.
- attr &= foregroundMask
- attr |= w.oldattr & backgroundMask
- case 90 <= n && n <= 97:
- attr = (attr & backgroundMask)
- attr |= foregroundIntensity
- if (n-90)&1 != 0 {
- attr |= foregroundRed
- }
- if (n-90)&2 != 0 {
- attr |= foregroundGreen
- }
- if (n-90)&4 != 0 {
- attr |= foregroundBlue
- }
- case 100 <= n && n <= 107:
- attr = (attr & foregroundMask)
- attr |= backgroundIntensity
- if (n-100)&1 != 0 {
- attr |= backgroundRed
- }
- if (n-100)&2 != 0 {
- attr |= backgroundGreen
- }
- if (n-100)&4 != 0 {
- attr |= backgroundBlue
- }
- }
- procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
- }
- }
- }
- }
- return len(data) - w.lastbuf.Len(), nil
-}
-
-type consoleColor struct {
- rgb int
- red bool
- green bool
- blue bool
- intensity bool
-}
-
-func (c consoleColor) foregroundAttr() (attr word) {
- if c.red {
- attr |= foregroundRed
- }
- if c.green {
- attr |= foregroundGreen
- }
- if c.blue {
- attr |= foregroundBlue
- }
- if c.intensity {
- attr |= foregroundIntensity
- }
- return
-}
-
-func (c consoleColor) backgroundAttr() (attr word) {
- if c.red {
- attr |= backgroundRed
- }
- if c.green {
- attr |= backgroundGreen
- }
- if c.blue {
- attr |= backgroundBlue
- }
- if c.intensity {
- attr |= backgroundIntensity
- }
- return
-}
-
-var color16 = []consoleColor{
- consoleColor{0x000000, false, false, false, false},
- consoleColor{0x000080, false, false, true, false},
- consoleColor{0x008000, false, true, false, false},
- consoleColor{0x008080, false, true, true, false},
- consoleColor{0x800000, true, false, false, false},
- consoleColor{0x800080, true, false, true, false},
- consoleColor{0x808000, true, true, false, false},
- consoleColor{0xc0c0c0, true, true, true, false},
- consoleColor{0x808080, false, false, false, true},
- consoleColor{0x0000ff, false, false, true, true},
- consoleColor{0x00ff00, false, true, false, true},
- consoleColor{0x00ffff, false, true, true, true},
- consoleColor{0xff0000, true, false, false, true},
- consoleColor{0xff00ff, true, false, true, true},
- consoleColor{0xffff00, true, true, false, true},
- consoleColor{0xffffff, true, true, true, true},
-}
-
-type hsv struct {
- h, s, v float32
-}
-
-func (a hsv) dist(b hsv) float32 {
- dh := a.h - b.h
- switch {
- case dh > 0.5:
- dh = 1 - dh
- case dh < -0.5:
- dh = -1 - dh
- }
- ds := a.s - b.s
- dv := a.v - b.v
- return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
-}
-
-func toHSV(rgb int) hsv {
- r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
- float32((rgb&0x00FF00)>>8)/256.0,
- float32(rgb&0x0000FF)/256.0
- min, max := minmax3f(r, g, b)
- h := max - min
- if h > 0 {
- if max == r {
- h = (g - b) / h
- if h < 0 {
- h += 6
- }
- } else if max == g {
- h = 2 + (b-r)/h
- } else {
- h = 4 + (r-g)/h
- }
- }
- h /= 6.0
- s := max - min
- if max != 0 {
- s /= max
- }
- v := max
- return hsv{h: h, s: s, v: v}
-}
-
-type hsvTable []hsv
-
-func toHSVTable(rgbTable []consoleColor) hsvTable {
- t := make(hsvTable, len(rgbTable))
- for i, c := range rgbTable {
- t[i] = toHSV(c.rgb)
- }
- return t
-}
-
-func (t hsvTable) find(rgb int) consoleColor {
- hsv := toHSV(rgb)
- n := 7
- l := float32(5.0)
- for i, p := range t {
- d := hsv.dist(p)
- if d < l {
- l, n = d, i
- }
- }
- return color16[n]
-}
-
-func minmax3f(a, b, c float32) (min, max float32) {
- if a < b {
- if b < c {
- return a, c
- } else if a < c {
- return a, b
- } else {
- return c, b
- }
- } else {
- if a < c {
- return b, c
- } else if b < c {
- return b, a
- } else {
- return c, a
- }
- }
-}
-
-var n256foreAttr []word
-var n256backAttr []word
-
-func n256setup() {
- n256foreAttr = make([]word, 256)
- n256backAttr = make([]word, 256)
- t := toHSVTable(color16)
- for i, rgb := range color256 {
- c := t.find(rgb)
- n256foreAttr[i] = c.foregroundAttr()
- n256backAttr[i] = c.backgroundAttr()
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
deleted file mode 100644
index fb976dbd..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package colorable
-
-import (
- "bytes"
- "fmt"
- "io"
-)
-
-type NonColorable struct {
- out io.Writer
- lastbuf bytes.Buffer
-}
-
-func NewNonColorable(w io.Writer) io.Writer {
- return &NonColorable{out: w}
-}
-
-func (w *NonColorable) Write(data []byte) (n int, err error) {
- er := bytes.NewBuffer(data)
-loop:
- for {
- c1, _, err := er.ReadRune()
- if err != nil {
- break loop
- }
- if c1 != 0x1b {
- fmt.Fprint(w.out, string(c1))
- continue
- }
- c2, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- break loop
- }
- if c2 != 0x5b {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- continue
- }
-
- var buf bytes.Buffer
- for {
- c, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- w.lastbuf.Write(buf.Bytes())
- break loop
- }
- if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
- break
- }
- buf.Write([]byte(string(c)))
- }
- }
- return len(data) - w.lastbuf.Len(), nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
deleted file mode 100644
index 65dc692b..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) Yasuhiro MATSUMOTO
-
-MIT License (Expat)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
deleted file mode 100644
index 74845de4..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# go-isatty
-
-isatty for golang
-
-## Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/mattn/go-isatty"
- "os"
-)
-
-func main() {
- if isatty.IsTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Terminal")
- } else {
- fmt.Println("Is Not Terminal")
- }
-}
-```
-
-## Installation
-
-```
-$ go get github.com/mattn/go-isatty
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
deleted file mode 100644
index 17d4f90e..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package isatty implements interface to isatty
-package isatty
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
deleted file mode 100644
index 83c58877..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build appengine
-
-package isatty
-
-// IsTerminal returns true if the file descriptor is terminal which
-// is always false on on appengine classic which is a sandboxed PaaS.
-func IsTerminal(fd uintptr) bool {
- return false
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
deleted file mode 100644
index 98ffe86a..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build darwin freebsd openbsd netbsd
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
deleted file mode 100644
index 9d24bac1..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build linux
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
deleted file mode 100644
index 1f0c6bf5..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build solaris
-// +build !appengine
-
-package isatty
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func IsTerminal(fd uintptr) bool {
- var termio unix.Termio
- err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
- return err == nil
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
deleted file mode 100644
index 83c398b1..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build windows
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
deleted file mode 100644
index 36ee2a60..00000000
--- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-
-TeamCity Reporter for Ginkgo
-
-Makes use of TeamCity's support for Service Messages
-http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
-*/
-
-package reporters
-
-import (
- "fmt"
- "io"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-const (
- messageId = "##teamcity"
-)
-
-type TeamCityReporter struct {
- writer io.Writer
- testSuiteName string
-}
-
-func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
- return &TeamCityReporter{
- writer: writer,
- }
-}
-
-func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.testSuiteName = escape(summary.SuiteDescription)
- fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName)
-}
-
-func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("BeforeSuite", setupSummary)
-}
-
-func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("AfterSuite", setupSummary)
-}
-
-func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- testName := escape(name)
- fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
- message := escape(setupSummary.Failure.ComponentCodeLocation.String())
- details := escape(setupSummary.Failure.Message)
- fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
- durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
- fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
- }
-}
-
-func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
- testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
- fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
-}
-
-func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
-
- if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
- message := escape(specSummary.Failure.ComponentCodeLocation.String())
- details := escape(specSummary.Failure.Message)
- fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
- }
- if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
- fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName)
- }
-
- durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
- fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
-}
-
-func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName)
-}
-
-func escape(output string) string {
- output = strings.Replace(output, "|", "||", -1)
- output = strings.Replace(output, "'", "|'", -1)
- output = strings.Replace(output, "\n", "|n", -1)
- output = strings.Replace(output, "\r", "|r", -1)
- output = strings.Replace(output, "[", "|[", -1)
- output = strings.Replace(output, "]", "|]", -1)
- return output
-}
diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go
deleted file mode 100644
index 935a89e1..00000000
--- a/vendor/github.com/onsi/ginkgo/types/code_location.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package types
-
-import (
- "fmt"
-)
-
-type CodeLocation struct {
- FileName string
- LineNumber int
- FullStackTrace string
-}
-
-func (codeLocation CodeLocation) String() string {
- return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
-}
diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go
deleted file mode 100644
index fdd6ed5b..00000000
--- a/vendor/github.com/onsi/ginkgo/types/synchronization.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package types
-
-import (
- "encoding/json"
-)
-
-type RemoteBeforeSuiteState int
-
-const (
- RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
-
- RemoteBeforeSuiteStatePending
- RemoteBeforeSuiteStatePassed
- RemoteBeforeSuiteStateFailed
- RemoteBeforeSuiteStateDisappeared
-)
-
-type RemoteBeforeSuiteData struct {
- Data []byte
- State RemoteBeforeSuiteState
-}
-
-func (r RemoteBeforeSuiteData) ToJSON() []byte {
- data, _ := json.Marshal(r)
- return data
-}
-
-type RemoteAfterSuiteData struct {
- CanRun bool
-}
diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go
deleted file mode 100644
index 0e89521b..00000000
--- a/vendor/github.com/onsi/ginkgo/types/types.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package types
-
-import (
- "strconv"
- "time"
-)
-
-const GINKGO_FOCUS_EXIT_CODE = 197
-
-/*
-SuiteSummary represents the a summary of the test suite and is passed to both
-Reporter.SpecSuiteWillBegin
-Reporter.SpecSuiteDidEnd
-
-this is unfortunate as these two methods should receive different objects. When running in parallel
-each node does not deterministically know how many specs it will end up running.
-
-Unfortunately making such a change would break backward compatibility.
-
-Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields
-with -1.
-*/
-type SuiteSummary struct {
- SuiteDescription string
- SuiteSucceeded bool
- SuiteID string
-
- NumberOfSpecsBeforeParallelization int
- NumberOfTotalSpecs int
- NumberOfSpecsThatWillBeRun int
- NumberOfPendingSpecs int
- NumberOfSkippedSpecs int
- NumberOfPassedSpecs int
- NumberOfFailedSpecs int
- // Flaked specs are those that failed initially, but then passed on a
- // subsequent try.
- NumberOfFlakedSpecs int
- RunTime time.Duration
-}
-
-type SpecSummary struct {
- ComponentTexts []string
- ComponentCodeLocations []CodeLocation
-
- State SpecState
- RunTime time.Duration
- Failure SpecFailure
- IsMeasurement bool
- NumberOfSamples int
- Measurements map[string]*SpecMeasurement
-
- CapturedOutput string
- SuiteID string
-}
-
-func (s SpecSummary) HasFailureState() bool {
- return s.State.IsFailure()
-}
-
-func (s SpecSummary) TimedOut() bool {
- return s.State == SpecStateTimedOut
-}
-
-func (s SpecSummary) Panicked() bool {
- return s.State == SpecStatePanicked
-}
-
-func (s SpecSummary) Failed() bool {
- return s.State == SpecStateFailed
-}
-
-func (s SpecSummary) Passed() bool {
- return s.State == SpecStatePassed
-}
-
-func (s SpecSummary) Skipped() bool {
- return s.State == SpecStateSkipped
-}
-
-func (s SpecSummary) Pending() bool {
- return s.State == SpecStatePending
-}
-
-type SetupSummary struct {
- ComponentType SpecComponentType
- CodeLocation CodeLocation
-
- State SpecState
- RunTime time.Duration
- Failure SpecFailure
-
- CapturedOutput string
- SuiteID string
-}
-
-type SpecFailure struct {
- Message string
- Location CodeLocation
- ForwardedPanic string
-
- ComponentIndex int
- ComponentType SpecComponentType
- ComponentCodeLocation CodeLocation
-}
-
-type SpecMeasurement struct {
- Name string
- Info interface{}
- Order int
-
- Results []float64
-
- Smallest float64
- Largest float64
- Average float64
- StdDeviation float64
-
- SmallestLabel string
- LargestLabel string
- AverageLabel string
- Units string
- Precision int
-}
-
-func (s SpecMeasurement) PrecisionFmt() string {
- if s.Precision == 0 {
- return "%f"
- }
-
- str := strconv.Itoa(s.Precision)
-
- return "%." + str + "f"
-}
-
-type SpecState uint
-
-const (
- SpecStateInvalid SpecState = iota
-
- SpecStatePending
- SpecStateSkipped
- SpecStatePassed
- SpecStateFailed
- SpecStatePanicked
- SpecStateTimedOut
-)
-
-func (state SpecState) IsFailure() bool {
- return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
-}
-
-type SpecComponentType uint
-
-const (
- SpecComponentTypeInvalid SpecComponentType = iota
-
- SpecComponentTypeContainer
- SpecComponentTypeBeforeSuite
- SpecComponentTypeAfterSuite
- SpecComponentTypeBeforeEach
- SpecComponentTypeJustBeforeEach
- SpecComponentTypeJustAfterEach
- SpecComponentTypeAfterEach
- SpecComponentTypeIt
- SpecComponentTypeMeasure
-)
-
-type FlagType uint
-
-const (
- FlagTypeNone FlagType = iota
- FlagTypeFocused
- FlagTypePending
-)
diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore
deleted file mode 100644
index 720c13cb..00000000
--- a/vendor/github.com/onsi/gomega/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-.DS_Store
-*.test
-.
-.idea
-gomega.iml
diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml
deleted file mode 100644
index 4d71367f..00000000
--- a/vendor/github.com/onsi/gomega/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: go
-
-go:
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
- - 1.10.x
- - 1.11.x
-
-env:
- - GO111MODULE=on
-
-install:
- - go get -v ./...
- - go build ./...
- - go get github.com/onsi/ginkgo
- - go install github.com/onsi/ginkgo/ginkgo
-
-script: |
- $HOME/gopath/bin/ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race &&
- go vet &&
- [ -z "`gofmt -l -e -s -w .`" ]
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
deleted file mode 100644
index 9153294f..00000000
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ /dev/null
@@ -1,125 +0,0 @@
-## 1.4.3
-
-### Fixes:
-
-- ensure file name and line numbers are correctly reported for XUnit [6fff58f]
-- Fixed matcher for content-type (#305) [69d9b43]
-
-## 1.4.2
-
-### Fixes:
-
-- Add go.mod and go.sum files to define the gomega go module [f3de367, a085d30]
-- Work around go vet issue with Go v1.11 (#300) [40dd6ad]
-- Better output when using with go XUnit-style tests, fixes #255 (#297) [29a4b97]
-- Fix MatchJSON fail to parse json.RawMessage (#298) [ae19f1b]
-- show threshold in failure message of BeNumericallyMatcher (#293) [4bbecc8]
-
-## 1.4.1
-
-### Fixes:
-
-- Update documentation formatting and examples (#289) [9be8410]
-- allow 'Receive' matcher to be used with concrete types (#286) [41673fd]
-- Fix data race in ghttp server (#283) [7ac6b01]
-- Travis badge should only show master [cc102ab]
-
-## 1.4.0
-
-### Features
-- Make string pretty diff user configurable (#273) [eb112ce, 649b44d]
-
-### Fixes
-- Use httputil.DumpRequest to pretty-print unhandled requests (#278) [a4ff0fc, b7d1a52]
-- fix typo floa32 > float32 (#272) [041ae3b, 6e33911]
-- Fix link to documentation on adding your own matchers (#270) [bb2c830, fcebc62]
-- Use setters and getters to avoid race condition (#262) [13057c3, a9c79f1]
-- Avoid sending a signal if the process is not alive (#259) [b8043e5, 4fc1762]
-- Improve message from AssignableToTypeOf when expected value is nil (#281) [9c1fb20]
-
-## 1.3.0
-
-Improvements:
-
-- The `Equal` matcher matches byte slices more performantly.
-- Improved how `MatchError` matches error strings.
-- `MatchXML` ignores the order of xml node attributes.
-- Improve support for XUnit style golang tests. ([#254](https://github.com/onsi/gomega/issues/254))
-
-Bug Fixes:
-
-- Diff generation now handles multi-byte sequences correctly.
-- Multiple goroutines can now call `gexec.Build` concurrently.
-
-## 1.2.0
-
-Improvements:
-
-- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
-- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
-- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
-- Added `HavePrefix` and `HaveSuffix` matchers.
-- `ghttp` can now handle concurrent requests.
-- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
-- Improved `ghttp`'s behavior around failing assertions and panics:
- - If a registered handler makes a failing assertion `ghttp` will return `500`.
- - If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive.
-- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
-- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
-- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
-- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time.
-- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer.
-
-Bug Fixes:
-- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
-- `ContainElement` no longer bails if a passed-in matcher errors.
-
-## 1.0 (8/2/2014)
-
-No changes. Dropping "beta" from the version number.
-
-## 1.0.0-beta (7/8/2014)
-Breaking Changes:
-
-- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
-- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
-
-New Test-Support Features:
-
-- `ghttp`: supports testing http clients
- - Provides a flexible fake http server
- - Provides a collection of chainable http handlers that perform assertions.
-- `gbytes`: supports making ordered assertions against streams of data
- - Provides a `gbytes.Buffer`
- - Provides a `Say` matcher to perform ordered assertions against output data
-- `gexec`: supports testing external processes
- - Provides support for building Go binaries
- - Wraps and starts `exec.Cmd` commands
- - Makes it easy to assert against stdout and stderr
- - Makes it easy to send signals and wait for processes to exit
- - Provides an `Exit` matcher to assert against exit code.
-
-DSL Changes:
-
-- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
-- The default timeouts for `Eventually` and `Consistently` are now configurable.
-
-New Matchers:
-
-- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
-- `BeTemporally`: like `BeNumerically` but for `time.Time`
-- `HaveKeyWithValue`: asserts a map has a given key with the given value.
-
-Updated Matchers:
-
-- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
-- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
-
-Misc:
-
-- Start using semantic versioning
-- Start maintaining changelog
-
-Major refactor:
-
-- Pull out Gomega's internal to `internal`
diff --git a/vendor/github.com/onsi/gomega/CONTRIBUTING.md b/vendor/github.com/onsi/gomega/CONTRIBUTING.md
deleted file mode 100644
index 0d7a0992..00000000
--- a/vendor/github.com/onsi/gomega/CONTRIBUTING.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Contributing to Gomega
-
-Your contributions to Gomega are essential for its long-term maintenance and improvement. To make a contribution:
-
-- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
-- Ensure adequate test coverage:
- - Make sure to add appropriate unit tests
- - Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR
- - Please run following linter locally `go vet ./...` and make sure output does not contain any warnings
-- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR.
-
-If you're a committer, check out RELEASING.md to learn how to cut a release.
-
-Thanks for supporting Gomega!
diff --git a/vendor/github.com/onsi/gomega/LICENSE b/vendor/github.com/onsi/gomega/LICENSE
deleted file mode 100644
index 9415ee72..00000000
--- a/vendor/github.com/onsi/gomega/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2013-2014 Onsi Fakhouri
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md
deleted file mode 100644
index 76aa6b55..00000000
--- a/vendor/github.com/onsi/gomega/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png)
-
-[![Build Status](https://travis-ci.org/onsi/gomega.svg?branch=master)](https://travis-ci.org/onsi/gomega)
-
-Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
-
-If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
-
-## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
-
-Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
-
-## Community Matchers
-
-A collection of community matchers is available on the [wiki](https://github.com/onsi/gomega/wiki).
-
-## License
-
-Gomega is MIT-Licensed
-
-The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license.
diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md
deleted file mode 100644
index 998d64ee..00000000
--- a/vendor/github.com/onsi/gomega/RELEASING.md
+++ /dev/null
@@ -1,12 +0,0 @@
-A Gomega release is a tagged sha and a GitHub release. To cut a release:
-
-1. Ensure CHANGELOG.md is up to date.
- - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
- - Categorize the changes into
- - Breaking Changes (requires a major version)
- - New Features (minor version)
- - Fixes (fix version)
- - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
-2. Update GOMEGA_VERSION in `gomega_dsl.go`
-3. Push a commit with the version number as the commit message (e.g. `v1.3.0`)
-4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
deleted file mode 100644
index 6559525f..00000000
--- a/vendor/github.com/onsi/gomega/format/format.go
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
-Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
-*/
-package format
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
-var MaxDepth = uint(10)
-
-/*
-By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
-
-Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
-
-Note that GoString and String don't always have all the information you need to understand why a test failed!
-*/
-var UseStringerRepresentation = false
-
-/*
-Print the content of context objects. By default it will be suppressed.
-
-Set PrintContextObjects = true to enable printing of the context internals.
-*/
-var PrintContextObjects = false
-
-// TruncatedDiff choose if we should display a truncated pretty diff or not
-var TruncatedDiff = true
-
-// Ctx interface defined here to keep backwards compatability with go < 1.7
-// It matches the context.Context interface
-type Ctx interface {
- Deadline() (deadline time.Time, ok bool)
- Done() <-chan struct{}
- Err() error
- Value(key interface{}) interface{}
-}
-
-var contextType = reflect.TypeOf((*Ctx)(nil)).Elem()
-var timeType = reflect.TypeOf(time.Time{})
-
-//The default indentation string emitted by the format package
-var Indent = " "
-
-var longFormThreshold = 20
-
-/*
-Generates a formatted matcher success/failure message of the form:
-
- Expected
-
-
-
-
-If expected is omited, then the message looks like:
-
- Expected
-
-
-*/
-func Message(actual interface{}, message string, expected ...interface{}) string {
- if len(expected) == 0 {
- return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
- }
- return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
-}
-
-/*
-
-Generates a nicely formatted matcher success / failure message
-
-Much like Message(...), but it attempts to pretty print diffs in strings
-
-Expected
- : "...aaaaabaaaaa..."
-to equal |
- : "...aaaaazaaaaa..."
-
-*/
-
-func MessageWithDiff(actual, message, expected string) string {
- if TruncatedDiff && len(actual) >= truncateThreshold && len(expected) >= truncateThreshold {
- diffPoint := findFirstMismatch(actual, expected)
- formattedActual := truncateAndFormat(actual, diffPoint)
- formattedExpected := truncateAndFormat(expected, diffPoint)
-
- spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected)
-
- tabLength := 4
- spaceFromMessageToActual := tabLength + len(": ") - len(message)
- padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|"
- return Message(formattedActual, message+padding, formattedExpected)
- }
- return Message(actual, message, expected)
-}
-
-func truncateAndFormat(str string, index int) string {
- leftPadding := `...`
- rightPadding := `...`
-
- start := index - charactersAroundMismatchToInclude
- if start < 0 {
- start = 0
- leftPadding = ""
- }
-
- // slice index must include the mis-matched character
- lengthOfMismatchedCharacter := 1
- end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter
- if end > len(str) {
- end = len(str)
- rightPadding = ""
-
- }
- return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding)
-}
-
-func findFirstMismatch(a, b string) int {
- aSlice := strings.Split(a, "")
- bSlice := strings.Split(b, "")
-
- for index, str := range aSlice {
- if index > len(bSlice)-1 {
- return index
- }
- if str != bSlice[index] {
- return index
- }
- }
-
- if len(b) > len(a) {
- return len(a) + 1
- }
-
- return 0
-}
-
-const (
- truncateThreshold = 50
- charactersAroundMismatchToInclude = 5
-)
-
-/*
-Pretty prints the passed in object at the passed in indentation level.
-
-Object recurses into deeply nested objects emitting pretty-printed representations of their components.
-
-Modify format.MaxDepth to control how deep the recursion is allowed to go
-Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
-recursing into the object.
-
-Set PrintContextObjects to true to print the content of objects implementing context.Context
-*/
-func Object(object interface{}, indentation uint) string {
- indent := strings.Repeat(Indent, int(indentation))
- value := reflect.ValueOf(object)
- return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
-}
-
-/*
-IndentString takes a string and indents each line by the specified amount.
-*/
-func IndentString(s string, indentation uint) string {
- components := strings.Split(s, "\n")
- result := ""
- indent := strings.Repeat(Indent, int(indentation))
- for i, component := range components {
- result += indent + component
- if i < len(components)-1 {
- result += "\n"
- }
- }
-
- return result
-}
-
-func formatType(object interface{}) string {
- t := reflect.TypeOf(object)
- if t == nil {
- return "nil"
- }
- switch t.Kind() {
- case reflect.Chan:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
- case reflect.Ptr:
- return fmt.Sprintf("%T | %p", object, object)
- case reflect.Slice:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
- case reflect.Map:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d", object, v.Len())
- default:
- return fmt.Sprintf("%T", object)
- }
-}
-
-func formatValue(value reflect.Value, indentation uint) string {
- if indentation > MaxDepth {
- return "..."
- }
-
- if isNilValue(value) {
- return "nil"
- }
-
- if UseStringerRepresentation {
- if value.CanInterface() {
- obj := value.Interface()
- switch x := obj.(type) {
- case fmt.GoStringer:
- return x.GoString()
- case fmt.Stringer:
- return x.String()
- }
- }
- }
-
- if !PrintContextObjects {
- if value.Type().Implements(contextType) && indentation > 1 {
- return ""
- }
- }
-
- switch value.Kind() {
- case reflect.Bool:
- return fmt.Sprintf("%v", value.Bool())
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return fmt.Sprintf("%v", value.Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return fmt.Sprintf("%v", value.Uint())
- case reflect.Uintptr:
- return fmt.Sprintf("0x%x", value.Uint())
- case reflect.Float32, reflect.Float64:
- return fmt.Sprintf("%v", value.Float())
- case reflect.Complex64, reflect.Complex128:
- return fmt.Sprintf("%v", value.Complex())
- case reflect.Chan:
- return fmt.Sprintf("0x%x", value.Pointer())
- case reflect.Func:
- return fmt.Sprintf("0x%x", value.Pointer())
- case reflect.Ptr:
- return formatValue(value.Elem(), indentation)
- case reflect.Slice:
- return formatSlice(value, indentation)
- case reflect.String:
- return formatString(value.String(), indentation)
- case reflect.Array:
- return formatSlice(value, indentation)
- case reflect.Map:
- return formatMap(value, indentation)
- case reflect.Struct:
- if value.Type() == timeType && value.CanInterface() {
- t, _ := value.Interface().(time.Time)
- return t.Format(time.RFC3339Nano)
- }
- return formatStruct(value, indentation)
- case reflect.Interface:
- return formatValue(value.Elem(), indentation)
- default:
- if value.CanInterface() {
- return fmt.Sprintf("%#v", value.Interface())
- }
- return fmt.Sprintf("%#v", value)
- }
-}
-
-func formatString(object interface{}, indentation uint) string {
- if indentation == 1 {
- s := fmt.Sprintf("%s", object)
- components := strings.Split(s, "\n")
- result := ""
- for i, component := range components {
- if i == 0 {
- result += component
- } else {
- result += Indent + component
- }
- if i < len(components)-1 {
- result += "\n"
- }
- }
-
- return fmt.Sprintf("%s", result)
- } else {
- return fmt.Sprintf("%q", object)
- }
-}
-
-func formatSlice(v reflect.Value, indentation uint) string {
- if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) {
- return formatString(v.Bytes(), indentation)
- }
-
- l := v.Len()
- result := make([]string, l)
- longest := 0
- for i := 0; i < l; i++ {
- result[i] = formatValue(v.Index(i), indentation+1)
- if len(result[i]) > longest {
- longest = len(result[i])
- }
- }
-
- if longest > longFormThreshold {
- indenter := strings.Repeat(Indent, int(indentation))
- return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
- }
- return fmt.Sprintf("[%s]", strings.Join(result, ", "))
-}
-
-func formatMap(v reflect.Value, indentation uint) string {
- l := v.Len()
- result := make([]string, l)
-
- longest := 0
- for i, key := range v.MapKeys() {
- value := v.MapIndex(key)
- result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1))
- if len(result[i]) > longest {
- longest = len(result[i])
- }
- }
-
- if longest > longFormThreshold {
- indenter := strings.Repeat(Indent, int(indentation))
- return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
- }
- return fmt.Sprintf("{%s}", strings.Join(result, ", "))
-}
-
-func formatStruct(v reflect.Value, indentation uint) string {
- t := v.Type()
-
- l := v.NumField()
- result := []string{}
- longest := 0
- for i := 0; i < l; i++ {
- structField := t.Field(i)
- fieldEntry := v.Field(i)
- representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
- result = append(result, representation)
- if len(representation) > longest {
- longest = len(representation)
- }
- }
- if longest > longFormThreshold {
- indenter := strings.Repeat(Indent, int(indentation))
- return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
- }
- return fmt.Sprintf("{%s}", strings.Join(result, ", "))
-}
-
-func isNilValue(a reflect.Value) bool {
- switch a.Kind() {
- case reflect.Invalid:
- return true
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return a.IsNil()
- }
-
- return false
-}
-
-/*
-Returns true when the string is entirely made of printable runes, false otherwise.
-*/
-func isPrintableString(str string) bool {
- for _, runeValue := range str {
- if !strconv.IsPrint(runeValue) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
deleted file mode 100644
index 65eedf69..00000000
--- a/vendor/github.com/onsi/gomega/go.mod
+++ /dev/null
@@ -1,15 +0,0 @@
-module github.com/onsi/gomega
-
-require (
- github.com/fsnotify/fsnotify v1.4.7 // indirect
- github.com/golang/protobuf v1.2.0
- github.com/hpcloud/tail v1.0.0 // indirect
- github.com/onsi/ginkgo v1.6.0
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
- golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
- golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect
- golang.org/x/text v0.3.0 // indirect
- gopkg.in/fsnotify.v1 v1.4.7 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.2.1
-)
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
deleted file mode 100644
index b23f6ef0..00000000
--- a/vendor/github.com/onsi/gomega/go.sum
+++ /dev/null
@@ -1,24 +0,0 @@
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
deleted file mode 100644
index 471f691a..00000000
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
-Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
-
-The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
-
-Gomega on Github: http://github.com/onsi/gomega
-
-Learn more about Ginkgo online: http://onsi.github.io/ginkgo
-
-Ginkgo on Github: http://github.com/onsi/ginkgo
-
-Gomega is MIT-Licensed
-*/
-package gomega
-
-import (
- "fmt"
- "reflect"
- "time"
-
- "github.com/onsi/gomega/internal/assertion"
- "github.com/onsi/gomega/internal/asyncassertion"
- "github.com/onsi/gomega/internal/testingtsupport"
- "github.com/onsi/gomega/types"
-)
-
-const GOMEGA_VERSION = "1.4.3"
-
-const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
-If you're using Ginkgo then you probably forgot to put your assertion in an It().
-Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
-Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations.
-`
-
-var globalFailWrapper *types.GomegaFailWrapper
-
-var defaultEventuallyTimeout = time.Second
-var defaultEventuallyPollingInterval = 10 * time.Millisecond
-var defaultConsistentlyDuration = 100 * time.Millisecond
-var defaultConsistentlyPollingInterval = 10 * time.Millisecond
-
-//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
-//the fail handler passed into RegisterFailHandler is called.
-func RegisterFailHandler(handler types.GomegaFailHandler) {
- if handler == nil {
- globalFailWrapper = nil
- return
- }
-
- globalFailWrapper = &types.GomegaFailWrapper{
- Fail: handler,
- TWithHelper: testingtsupport.EmptyTWithHelper{},
- }
-}
-
-func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) {
- if handler == nil {
- globalFailWrapper = nil
- return
- }
-
- globalFailWrapper = &types.GomegaFailWrapper{
- Fail: handler,
- TWithHelper: t,
- }
-}
-
-//RegisterTestingT connects Gomega to Golang's XUnit style
-//Testing.T tests. It is now deprecated and you should use NewGomegaWithT() instead.
-//
-//Legacy Documentation:
-//
-//You'll need to call this at the top of each XUnit style test:
-//
-// func TestFarmHasCow(t *testing.T) {
-// RegisterTestingT(t)
-//
-// f := farm.New([]string{"Cow", "Horse"})
-// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
-// }
-//
-// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
-// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
-// in parallel as the global fail handler cannot point to more than one testing.T at a time.
-//
-// NewGomegaWithT() does not have this limitation
-//
-// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
-func RegisterTestingT(t types.GomegaTestingT) {
- tWithHelper, hasHelper := t.(types.TWithHelper)
- if !hasHelper {
- RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail)
- return
- }
- RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail)
-}
-
-//InterceptGomegaHandlers runs a given callback and returns an array of
-//failure messages generated by any Gomega assertions within the callback.
-//
-//This is accomplished by temporarily replacing the *global* fail handler
-//with a fail handler that simply annotates failures. The original fail handler
-//is reset when InterceptGomegaFailures returns.
-//
-//This is most useful when testing custom matchers, but can also be used to check
-//on a value using a Gomega assertion without causing a test failure.
-func InterceptGomegaFailures(f func()) []string {
- originalHandler := globalFailWrapper.Fail
- failures := []string{}
- RegisterFailHandler(func(message string, callerSkip ...int) {
- failures = append(failures, message)
- })
- f()
- RegisterFailHandler(originalHandler)
- return failures
-}
-
-//Ω wraps an actual value allowing assertions to be made on it:
-// Ω("foo").Should(Equal("foo"))
-//
-//If Ω is passed more than one argument it will pass the *first* argument to the matcher.
-//All subsequent arguments will be required to be nil/zero.
-//
-//This is convenient if you want to make an assertion on a method/function that returns
-//a value and an error - a common patter in Go.
-//
-//For example, given a function with signature:
-// func MyAmazingThing() (int, error)
-//
-//Then:
-// Ω(MyAmazingThing()).Should(Equal(3))
-//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
-//
-//Ω and Expect are identical
-func Ω(actual interface{}, extra ...interface{}) GomegaAssertion {
- return ExpectWithOffset(0, actual, extra...)
-}
-
-//Expect wraps an actual value allowing assertions to be made on it:
-// Expect("foo").To(Equal("foo"))
-//
-//If Expect is passed more than one argument it will pass the *first* argument to the matcher.
-//All subsequent arguments will be required to be nil/zero.
-//
-//This is convenient if you want to make an assertion on a method/function that returns
-//a value and an error - a common patter in Go.
-//
-//For example, given a function with signature:
-// func MyAmazingThing() (int, error)
-//
-//Then:
-// Expect(MyAmazingThing()).Should(Equal(3))
-//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
-//
-//Expect and Ω are identical
-func Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
- return ExpectWithOffset(0, actual, extra...)
-}
-
-//ExpectWithOffset wraps an actual value allowing assertions to be made on it:
-// ExpectWithOffset(1, "foo").To(Equal("foo"))
-//
-//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
-//this is used to modify the call-stack offset when computing line numbers.
-//
-//This is most useful in helper functions that make assertions. If you want Gomega's
-//error message to refer to the calling line in the test (as opposed to the line in the helper function)
-//set the first argument of `ExpectWithOffset` appropriately.
-func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion {
- if globalFailWrapper == nil {
- panic(nilFailHandlerPanic)
- }
- return assertion.New(actual, globalFailWrapper, offset, extra...)
-}
-
-//Eventually wraps an actual value allowing assertions to be made on it.
-//The assertion is tried periodically until it passes or a timeout occurs.
-//
-//Both the timeout and polling interval are configurable as optional arguments:
-//The first optional argument is the timeout
-//The second optional argument is the polling interval
-//
-//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
-//last case they are interpreted as seconds.
-//
-//If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
-//then Eventually will call the function periodically and try the matcher against the function's first return value.
-//
-//Example:
-//
-// Eventually(func() int {
-// return thingImPolling.Count()
-// }).Should(BeNumerically(">=", 17))
-//
-//Note that this example could be rewritten:
-//
-// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
-//
-//If the function returns more than one value, then Eventually will pass the first value to the matcher and
-//assert that all other values are nil/zero.
-//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
-//
-//For example, consider a method that returns a value and an error:
-// func FetchFromDB() (string, error)
-//
-//Then
-// Eventually(FetchFromDB).Should(Equal("hasselhoff"))
-//
-//Will pass only if the the returned error is nil and the returned string passes the matcher.
-//
-//Eventually's default timeout is 1 second, and its default polling interval is 10ms
-func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
- return EventuallyWithOffset(0, actual, intervals...)
-}
-
-//EventuallyWithOffset operates like Eventually but takes an additional
-//initial argument to indicate an offset in the call stack. This is useful when building helper
-//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
- if globalFailWrapper == nil {
- panic(nilFailHandlerPanic)
- }
- timeoutInterval := defaultEventuallyTimeout
- pollingInterval := defaultEventuallyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
-}
-
-//Consistently wraps an actual value allowing assertions to be made on it.
-//The assertion is tried periodically and is required to pass for a period of time.
-//
-//Both the total time and polling interval are configurable as optional arguments:
-//The first optional argument is the duration that Consistently will run for
-//The second optional argument is the polling interval
-//
-//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
-//last case they are interpreted as seconds.
-//
-//If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
-//then Consistently will call the function periodically and try the matcher against the function's first return value.
-//
-//If the function returns more than one value, then Consistently will pass the first value to the matcher and
-//assert that all other values are nil/zero.
-//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
-//
-//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
-//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
-//
-// Consistently(channel).ShouldNot(Receive())
-//
-//Consistently's default duration is 100ms, and its default polling interval is 10ms
-func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
- return ConsistentlyWithOffset(0, actual, intervals...)
-}
-
-//ConsistentlyWithOffset operates like Consistnetly but takes an additional
-//initial argument to indicate an offset in the call stack. This is useful when building helper
-//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
- if globalFailWrapper == nil {
- panic(nilFailHandlerPanic)
- }
- timeoutInterval := defaultConsistentlyDuration
- pollingInterval := defaultConsistentlyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
-}
-
-//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
-func SetDefaultEventuallyTimeout(t time.Duration) {
- defaultEventuallyTimeout = t
-}
-
-//Set the default polling interval for Eventually.
-func SetDefaultEventuallyPollingInterval(t time.Duration) {
- defaultEventuallyPollingInterval = t
-}
-
-//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
-func SetDefaultConsistentlyDuration(t time.Duration) {
- defaultConsistentlyDuration = t
-}
-
-//Set the default polling interval for Consistently.
-func SetDefaultConsistentlyPollingInterval(t time.Duration) {
- defaultConsistentlyPollingInterval = t
-}
-
-//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
-//the matcher passed to the Should and ShouldNot methods.
-//
-//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
-//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
-//descriptive
-//
-//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
-//
-//Example:
-//
-// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
-// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
-type GomegaAsyncAssertion interface {
- Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
-}
-
-//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher
-//passed to the Should/ShouldNot and To/ToNot/NotTo methods.
-//
-//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
-//though this is not enforced.
-//
-//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
-//and is used to annotate failure messages.
-//
-//All methods return a bool that is true if hte assertion passed and false if it failed.
-//
-//Example:
-//
-// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
-type GomegaAssertion interface {
- Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
-
- To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
-}
-
-//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
-type OmegaMatcher types.GomegaMatcher
-
-//GomegaWithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
-//Gomega's rich ecosystem of matchers in standard `testing` test suites.
-//
-//Use `NewGomegaWithT` to instantiate a `GomegaWithT`
-type GomegaWithT struct {
- t types.GomegaTestingT
-}
-
-//NewGomegaWithT takes a *testing.T and returngs a `GomegaWithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
-//Gomega's rich ecosystem of matchers in standard `testing` test suits.
-//
-// func TestFarmHasCow(t *testing.T) {
-// g := GomegaWithT(t)
-//
-// f := farm.New([]string{"Cow", "Horse"})
-// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
-// }
-func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT {
- return &GomegaWithT{
- t: t,
- }
-}
-
-//See documentation for Expect
-func (g *GomegaWithT) Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
- return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...)
-}
-
-//See documentation for Eventually
-func (g *GomegaWithT) Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
- timeoutInterval := defaultEventuallyTimeout
- pollingInterval := defaultEventuallyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
-}
-
-//See documentation for Consistently
-func (g *GomegaWithT) Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
- timeoutInterval := defaultConsistentlyDuration
- pollingInterval := defaultConsistentlyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
-}
-
-func toDuration(input interface{}) time.Duration {
- duration, ok := input.(time.Duration)
- if ok {
- return duration
- }
-
- value := reflect.ValueOf(input)
- kind := reflect.TypeOf(input).Kind()
-
- if reflect.Int <= kind && kind <= reflect.Int64 {
- return time.Duration(value.Int()) * time.Second
- } else if reflect.Uint <= kind && kind <= reflect.Uint64 {
- return time.Duration(value.Uint()) * time.Second
- } else if reflect.Float32 <= kind && kind <= reflect.Float64 {
- return time.Duration(value.Float() * float64(time.Second))
- } else if reflect.String == kind {
- duration, err := time.ParseDuration(value.String())
- if err != nil {
- panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
- }
- return duration
- }
-
- panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input))
-}
diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go
deleted file mode 100644
index 00197b67..00000000
--- a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package assertion
-
-import (
- "fmt"
- "reflect"
-
- "github.com/onsi/gomega/types"
-)
-
-type Assertion struct {
- actualInput interface{}
- failWrapper *types.GomegaFailWrapper
- offset int
- extra []interface{}
-}
-
-func New(actualInput interface{}, failWrapper *types.GomegaFailWrapper, offset int, extra ...interface{}) *Assertion {
- return &Assertion{
- actualInput: actualInput,
- failWrapper: failWrapper,
- offset: offset,
- extra: extra,
- }
-}
-
-func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
-}
-
-func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
-}
-
-func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
- switch len(optionalDescription) {
- case 0:
- return ""
- default:
- return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
- }
-}
-
-func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
- matches, err := matcher.Match(assertion.actualInput)
- description := assertion.buildDescription(optionalDescription...)
- assertion.failWrapper.TWithHelper.Helper()
- if err != nil {
- assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset)
- return false
- }
- if matches != desiredMatch {
- var message string
- if desiredMatch {
- message = matcher.FailureMessage(assertion.actualInput)
- } else {
- message = matcher.NegatedFailureMessage(assertion.actualInput)
- }
- assertion.failWrapper.Fail(description+message, 2+assertion.offset)
- return false
- }
-
- return true
-}
-
-func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
- success, message := vetExtras(assertion.extra)
- if success {
- return true
- }
-
- description := assertion.buildDescription(optionalDescription...)
- assertion.failWrapper.TWithHelper.Helper()
- assertion.failWrapper.Fail(description+message, 2+assertion.offset)
- return false
-}
-
-func vetExtras(extras []interface{}) (bool, string) {
- for i, extra := range extras {
- if extra != nil {
- zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
- if !reflect.DeepEqual(zeroValue, extra) {
- message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
- return false, message
- }
- }
- }
- return true, ""
-}
diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
deleted file mode 100644
index cdab233e..00000000
--- a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package asyncassertion
-
-import (
- "errors"
- "fmt"
- "reflect"
- "time"
-
- "github.com/onsi/gomega/internal/oraclematcher"
- "github.com/onsi/gomega/types"
-)
-
-type AsyncAssertionType uint
-
-const (
- AsyncAssertionTypeEventually AsyncAssertionType = iota
- AsyncAssertionTypeConsistently
-)
-
-type AsyncAssertion struct {
- asyncType AsyncAssertionType
- actualInput interface{}
- timeoutInterval time.Duration
- pollingInterval time.Duration
- failWrapper *types.GomegaFailWrapper
- offset int
-}
-
-func New(asyncType AsyncAssertionType, actualInput interface{}, failWrapper *types.GomegaFailWrapper, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
- actualType := reflect.TypeOf(actualInput)
- if actualType.Kind() == reflect.Func {
- if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
- panic("Expected a function with no arguments and one or more return values.")
- }
- }
-
- return &AsyncAssertion{
- asyncType: asyncType,
- actualInput: actualInput,
- failWrapper: failWrapper,
- timeoutInterval: timeoutInterval,
- pollingInterval: pollingInterval,
- offset: offset,
- }
-}
-
-func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.match(matcher, true, optionalDescription...)
-}
-
-func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
- switch len(optionalDescription) {
- case 0:
- return ""
- default:
- return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
- }
-}
-
-func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
- actualType := reflect.TypeOf(assertion.actualInput)
- return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
-}
-
-func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
- if assertion.actualInputIsAFunction() {
- values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
-
- extras := []interface{}{}
- for _, value := range values[1:] {
- extras = append(extras, value.Interface())
- }
-
- success, message := vetExtras(extras)
-
- if !success {
- return nil, errors.New(message)
- }
-
- return values[0].Interface(), nil
- }
-
- return assertion.actualInput, nil
-}
-
-func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
- if assertion.actualInputIsAFunction() {
- return true
- }
-
- return oraclematcher.MatchMayChangeInTheFuture(matcher, value)
-}
-
-func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
- timer := time.Now()
- timeout := time.After(assertion.timeoutInterval)
-
- description := assertion.buildDescription(optionalDescription...)
-
- var matches bool
- var err error
- mayChange := true
- value, err := assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
- }
-
- assertion.failWrapper.TWithHelper.Helper()
-
- fail := func(preamble string) {
- errMsg := ""
- message := ""
- if err != nil {
- errMsg = "Error: " + err.Error()
- } else {
- if desiredMatch {
- message = matcher.FailureMessage(value)
- } else {
- message = matcher.NegatedFailureMessage(value)
- }
- }
- assertion.failWrapper.TWithHelper.Helper()
- assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
- }
-
- if assertion.asyncType == AsyncAssertionTypeEventually {
- for {
- if err == nil && matches == desiredMatch {
- return true
- }
-
- if !mayChange {
- fail("No future change is possible. Bailing out early")
- return false
- }
-
- select {
- case <-time.After(assertion.pollingInterval):
- value, err = assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
- }
- case <-timeout:
- fail("Timed out")
- return false
- }
- }
- } else if assertion.asyncType == AsyncAssertionTypeConsistently {
- for {
- if !(err == nil && matches == desiredMatch) {
- fail("Failed")
- return false
- }
-
- if !mayChange {
- return true
- }
-
- select {
- case <-time.After(assertion.pollingInterval):
- value, err = assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
- }
- case <-timeout:
- return true
- }
- }
- }
-
- return false
-}
-
-func vetExtras(extras []interface{}) (bool, string) {
- for i, extra := range extras {
- if extra != nil {
- zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
- if !reflect.DeepEqual(zeroValue, extra) {
- message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
- return false, message
- }
- }
- }
- return true, ""
-}
diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
deleted file mode 100644
index 66cad88a..00000000
--- a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package oraclematcher
-
-import "github.com/onsi/gomega/types"
-
-/*
-GomegaMatchers that also match the OracleMatcher interface can convey information about
-whether or not their result will change upon future attempts.
-
-This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
-
-For example, a process' exit code can never change. So, gexec's Exit matcher returns `true`
-for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
-*/
-type OracleMatcher interface {
- MatchMayChangeInTheFuture(actual interface{}) bool
-}
-
-func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool {
- oracleMatcher, ok := matcher.(OracleMatcher)
- if !ok {
- return true
- }
-
- return oracleMatcher.MatchMayChangeInTheFuture(value)
-}
diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
deleted file mode 100644
index bb27032f..00000000
--- a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package testingtsupport
-
-import (
- "regexp"
- "runtime/debug"
- "strings"
-
- "github.com/onsi/gomega/types"
-)
-
-var StackTracePruneRE = regexp.MustCompile(`\/gomega\/|\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
-
-type EmptyTWithHelper struct{}
-
-func (e EmptyTWithHelper) Helper() {}
-
-type gomegaTestingT interface {
- Fatalf(format string, args ...interface{})
-}
-
-func BuildTestingTGomegaFailWrapper(t gomegaTestingT) *types.GomegaFailWrapper {
- tWithHelper, hasHelper := t.(types.TWithHelper)
- if !hasHelper {
- tWithHelper = EmptyTWithHelper{}
- }
-
- fail := func(message string, callerSkip ...int) {
- if hasHelper {
- tWithHelper.Helper()
- t.Fatalf("\n%s", message)
- } else {
- skip := 2
- if len(callerSkip) > 0 {
- skip += callerSkip[0]
- }
- stackTrace := pruneStack(string(debug.Stack()), skip)
- t.Fatalf("\n%s\n%s\n", stackTrace, message)
- }
- }
-
- return &types.GomegaFailWrapper{
- Fail: fail,
- TWithHelper: tWithHelper,
- }
-}
-
-func pruneStack(fullStackTrace string, skip int) string {
- stack := strings.Split(fullStackTrace, "\n")[1:]
- if len(stack) > 2*skip {
- stack = stack[2*skip:]
- }
- prunedStack := []string{}
- for i := 0; i < len(stack)/2; i++ {
- if !StackTracePruneRE.Match([]byte(stack[i*2])) {
- prunedStack = append(prunedStack, stack[i*2])
- prunedStack = append(prunedStack, stack[i*2+1])
- }
- }
- return strings.Join(prunedStack, "\n")
-}
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
deleted file mode 100644
index c3a326dd..00000000
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ /dev/null
@@ -1,427 +0,0 @@
-package gomega
-
-import (
- "time"
-
- "github.com/onsi/gomega/matchers"
- "github.com/onsi/gomega/types"
-)
-
-//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
-//types when performing comparisons.
-//It is an error for both actual and expected to be nil. Use BeNil() instead.
-func Equal(expected interface{}) types.GomegaMatcher {
- return &matchers.EqualMatcher{
- Expected: expected,
- }
-}
-
-//BeEquivalentTo is more lax than Equal, allowing equality between different types.
-//This is done by converting actual to have the type of expected before
-//attempting equality with reflect.DeepEqual.
-//It is an error for actual and expected to be nil. Use BeNil() instead.
-func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
- return &matchers.BeEquivalentToMatcher{
- Expected: expected,
- }
-}
-
-//BeIdenticalTo uses the == operator to compare actual with expected.
-//BeIdenticalTo is strict about types when performing comparisons.
-//It is an error for both actual and expected to be nil. Use BeNil() instead.
-func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
- return &matchers.BeIdenticalToMatcher{
- Expected: expected,
- }
-}
-
-//BeNil succeeds if actual is nil
-func BeNil() types.GomegaMatcher {
- return &matchers.BeNilMatcher{}
-}
-
-//BeTrue succeeds if actual is true
-func BeTrue() types.GomegaMatcher {
- return &matchers.BeTrueMatcher{}
-}
-
-//BeFalse succeeds if actual is false
-func BeFalse() types.GomegaMatcher {
- return &matchers.BeFalseMatcher{}
-}
-
-//HaveOccurred succeeds if actual is a non-nil error
-//The typical Go error checking pattern looks like:
-// err := SomethingThatMightFail()
-// Expect(err).ShouldNot(HaveOccurred())
-func HaveOccurred() types.GomegaMatcher {
- return &matchers.HaveOccurredMatcher{}
-}
-
-//Succeed passes if actual is a nil error
-//Succeed is intended to be used with functions that return a single error value. Instead of
-// err := SomethingThatMightFail()
-// Expect(err).ShouldNot(HaveOccurred())
-//
-//You can write:
-// Expect(SomethingThatMightFail()).Should(Succeed())
-//
-//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
-//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
-//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
-func Succeed() types.GomegaMatcher {
- return &matchers.SucceedMatcher{}
-}
-
-//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
-//
-//These are valid use-cases:
-// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
-// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
-//
-//It is an error for err to be nil or an object that does not implement the Error interface
-func MatchError(expected interface{}) types.GomegaMatcher {
- return &matchers.MatchErrorMatcher{
- Expected: expected,
- }
-}
-
-//BeClosed succeeds if actual is a closed channel.
-//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
-//
-//In order to check whether or not the channel is closed, Gomega must try to read from the channel
-//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
-//values coming down the channel.
-//
-//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
-//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
-//
-//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
-func BeClosed() types.GomegaMatcher {
- return &matchers.BeClosedMatcher{}
-}
-
-//Receive succeeds if there is a value to be received on actual.
-//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
-//
-//Receive returns immediately and never blocks:
-//
-//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
-//
-//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
-//
-//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
-//
-//If you have a go-routine running in the background that will write to channel `c` you can:
-// Eventually(c).Should(Receive())
-//
-//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
-//
-//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
-// Consistently(c).ShouldNot(Receive())
-//
-//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
-// Expect(c).Should(Receive(Equal("foo")))
-//
-//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
-//
-//Passing Receive a matcher is especially useful when paired with Eventually:
-//
-// Eventually(c).Should(Receive(ContainSubstring("bar")))
-//
-//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
-//
-//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
-// var myThing thing
-// Eventually(thingChan).Should(Receive(&myThing))
-// Expect(myThing.Sprocket).Should(Equal("foo"))
-// Expect(myThing.IsValid()).Should(BeTrue())
-func Receive(args ...interface{}) types.GomegaMatcher {
- var arg interface{}
- if len(args) > 0 {
- arg = args[0]
- }
-
- return &matchers.ReceiveMatcher{
- Arg: arg,
- }
-}
-
-//BeSent succeeds if a value can be sent to actual.
-//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
-//In addition, actual must not be closed.
-//
-//BeSent never blocks:
-//
-//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately
-//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
-//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
-//
-//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
-//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
-func BeSent(arg interface{}) types.GomegaMatcher {
- return &matchers.BeSentMatcher{
- Arg: arg,
- }
-}
-
-//MatchRegexp succeeds if actual is a string or stringer that matches the
-//passed-in regexp. Optional arguments can be provided to construct a regexp
-//via fmt.Sprintf().
-func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
- return &matchers.MatchRegexpMatcher{
- Regexp: regexp,
- Args: args,
- }
-}
-
-//ContainSubstring succeeds if actual is a string or stringer that contains the
-//passed-in substring. Optional arguments can be provided to construct the substring
-//via fmt.Sprintf().
-func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
- return &matchers.ContainSubstringMatcher{
- Substr: substr,
- Args: args,
- }
-}
-
-//HavePrefix succeeds if actual is a string or stringer that contains the
-//passed-in string as a prefix. Optional arguments can be provided to construct
-//via fmt.Sprintf().
-func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
- return &matchers.HavePrefixMatcher{
- Prefix: prefix,
- Args: args,
- }
-}
-
-//HaveSuffix succeeds if actual is a string or stringer that contains the
-//passed-in string as a suffix. Optional arguments can be provided to construct
-//via fmt.Sprintf().
-func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
- return &matchers.HaveSuffixMatcher{
- Suffix: suffix,
- Args: args,
- }
-}
-
-//MatchJSON succeeds if actual is a string or stringer of JSON that matches
-//the expected JSON. The JSONs are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchJSON(json interface{}) types.GomegaMatcher {
- return &matchers.MatchJSONMatcher{
- JSONToMatch: json,
- }
-}
-
-//MatchXML succeeds if actual is a string or stringer of XML that matches
-//the expected XML. The XMLs are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like whitespaces shouldn't matter.
-func MatchXML(xml interface{}) types.GomegaMatcher {
- return &matchers.MatchXMLMatcher{
- XMLToMatch: xml,
- }
-}
-
-//MatchYAML succeeds if actual is a string or stringer of YAML that matches
-//the expected YAML. The YAML's are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchYAML(yaml interface{}) types.GomegaMatcher {
- return &matchers.MatchYAMLMatcher{
- YAMLToMatch: yaml,
- }
-}
-
-//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
-func BeEmpty() types.GomegaMatcher {
- return &matchers.BeEmptyMatcher{}
-}
-
-//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
-func HaveLen(count int) types.GomegaMatcher {
- return &matchers.HaveLenMatcher{
- Count: count,
- }
-}
-
-//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice.
-func HaveCap(count int) types.GomegaMatcher {
- return &matchers.HaveCapMatcher{
- Count: count,
- }
-}
-
-//BeZero succeeds if actual is the zero value for its type or if actual is nil.
-func BeZero() types.GomegaMatcher {
- return &matchers.BeZeroMatcher{}
-}
-
-//ContainElement succeeds if actual contains the passed in element.
-//By default ContainElement() uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
-//
-//Actual must be an array, slice or map.
-//For maps, ContainElement searches through the map's values.
-func ContainElement(element interface{}) types.GomegaMatcher {
- return &matchers.ContainElementMatcher{
- Element: element,
- }
-}
-
-//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
-//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
-//
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
-//
-//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
-//
-//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
-//is the only element passed in to ConsistOf:
-//
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
-//
-//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
-func ConsistOf(elements ...interface{}) types.GomegaMatcher {
- return &matchers.ConsistOfMatcher{
- Elements: elements,
- }
-}
-
-//HaveKey succeeds if actual is a map with the passed in key.
-//By default HaveKey uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
-func HaveKey(key interface{}) types.GomegaMatcher {
- return &matchers.HaveKeyMatcher{
- Key: key,
- }
-}
-
-//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
-//By default HaveKeyWithValue uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
-func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
- return &matchers.HaveKeyWithValueMatcher{
- Key: key,
- Value: value,
- }
-}
-
-//BeNumerically performs numerical assertions in a type-agnostic way.
-//Actual and expected should be numbers, though the specific type of
-//number is irrelevant (float32, float64, uint8, etc...).
-//
-//There are six, self-explanatory, supported comparators:
-// Expect(1.0).Should(BeNumerically("==", 1))
-// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01))
-// Expect(1.0).Should(BeNumerically(">", 0.9))
-// Expect(1.0).Should(BeNumerically(">=", 1.0))
-// Expect(1.0).Should(BeNumerically("<", 3))
-// Expect(1.0).Should(BeNumerically("<=", 1.0))
-func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
- return &matchers.BeNumericallyMatcher{
- Comparator: comparator,
- CompareTo: compareTo,
- }
-}
-
-//BeTemporally compares time.Time's like BeNumerically
-//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
-// Expect(time.Now()).Should(BeTemporally(">", time.Time{}))
-// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
-func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
- return &matchers.BeTemporallyMatcher{
- Comparator: comparator,
- CompareTo: compareTo,
- Threshold: threshold,
- }
-}
-
-//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
-//It will return an error when one of the values is nil.
-// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values
-// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
-// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
-// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
-func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
- return &matchers.AssignableToTypeOfMatcher{
- Expected: expected,
- }
-}
-
-//Panic succeeds if actual is a function that, when invoked, panics.
-//Actual must be a function that takes no arguments and returns no results.
-func Panic() types.GomegaMatcher {
- return &matchers.PanicMatcher{}
-}
-
-//BeAnExistingFile succeeds if a file exists.
-//Actual must be a string representing the abs path to the file being checked.
-func BeAnExistingFile() types.GomegaMatcher {
- return &matchers.BeAnExistingFileMatcher{}
-}
-
-//BeARegularFile succeeds if a file exists and is a regular file.
-//Actual must be a string representing the abs path to the file being checked.
-func BeARegularFile() types.GomegaMatcher {
- return &matchers.BeARegularFileMatcher{}
-}
-
-//BeADirectory succeeds if a file exists and is a directory.
-//Actual must be a string representing the abs path to the file being checked.
-func BeADirectory() types.GomegaMatcher {
- return &matchers.BeADirectoryMatcher{}
-}
-
-//And succeeds only if all of the given matchers succeed.
-//The matchers are tried in order, and will fail-fast if one doesn't succeed.
-// Expect("hi").To(And(HaveLen(2), Equal("hi"))
-//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
-func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
- return &matchers.AndMatcher{Matchers: ms}
-}
-
-//SatisfyAll is an alias for And().
-// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
-func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
- return And(matchers...)
-}
-
-//Or succeeds if any of the given matchers succeed.
-//The matchers are tried in order and will return immediately upon the first successful match.
-// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
-//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
-func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
- return &matchers.OrMatcher{Matchers: ms}
-}
-
-//SatisfyAny is an alias for Or().
-// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
-func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
- return Or(matchers...)
-}
-
-//Not negates the given matcher; it succeeds if the given matcher fails.
-// Expect(1).To(Not(Equal(2))
-//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
-func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
- return &matchers.NotMatcher{Matcher: matcher}
-}
-
-//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
-//The given transform must be a function of one parameter that returns one value.
-// var plus1 = func(i int) int { return i + 1 }
-// Expect(1).To(WithTransform(plus1, Equal(2))
-//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
-func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
- return matchers.NewWithTransformMatcher(transform, matcher)
-}
diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go
deleted file mode 100644
index d83a2916..00000000
--- a/vendor/github.com/onsi/gomega/matchers/and.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package matchers
-
-import (
- "fmt"
-
- "github.com/onsi/gomega/format"
- "github.com/onsi/gomega/internal/oraclematcher"
- "github.com/onsi/gomega/types"
-)
-
-type AndMatcher struct {
- Matchers []types.GomegaMatcher
-
- // state
- firstFailedMatcher types.GomegaMatcher
-}
-
-func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
- m.firstFailedMatcher = nil
- for _, matcher := range m.Matchers {
- success, err := matcher.Match(actual)
- if !success || err != nil {
- m.firstFailedMatcher = matcher
- return false, err
- }
- }
- return true, nil
-}
-
-func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
- return m.firstFailedMatcher.FailureMessage(actual)
-}
-
-func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- // not the most beautiful list of matchers, but not bad either...
- return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
-}
-
-func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
- /*
- Example with 3 matchers: A, B, C
-
- Match evaluates them: T, F, > => F
- So match is currently F, what should MatchMayChangeInTheFuture() return?
- Seems like it only depends on B, since currently B MUST change to allow the result to become T
-
- Match eval: T, T, T => T
- So match is currently T, what should MatchMayChangeInTheFuture() return?
- Seems to depend on ANY of them being able to change to F.
- */
-
- if m.firstFailedMatcher == nil {
- // so all matchers succeeded.. Any one of them changing would change the result.
- for _, matcher := range m.Matchers {
- if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
- return true
- }
- }
- return false // none of were going to change
- }
- // one of the matchers failed.. it must be able to change in order to affect the result
- return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
-}
diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
deleted file mode 100644
index 51f8be6a..00000000
--- a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package matchers
-
-import (
- "fmt"
- "reflect"
-
- "github.com/onsi/gomega/format"
-)
-
-type AssignableToTypeOfMatcher struct {
- Expected interface{}
-}
-
-func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
- if actual == nil && matcher.Expected == nil {
- return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
- } else if matcher.Expected == nil {
- return false, fmt.Errorf("Refusing to compare type to