diff --git a/.dockerignore b/.dockerignore index 82a738e4..4672f358 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,3 +8,5 @@ README.md *.md .github/* .github_build/* +core +core-* diff --git a/Dockerfile b/Dockerfile index 533b5c34..409ee414 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,9 @@ -ARG GOLANG_IMAGE=golang:1.22-alpine3.19 -ARG BUILD_IMAGE=alpine:3.19 +ARG GOLANG_IMAGE=golang:1.22-alpine3.20 +ARG BUILD_IMAGE=alpine:3.20 # Cross-Compilation # https://www.docker.com/blog/faster-multi-platform-builds-dockerfile-cross-compilation-guide/ -FROM --platform=$BUILDPLATFORM $GOLANG_IMAGE as builder +FROM --platform=$BUILDPLATFORM $GOLANG_IMAGE AS builder ARG TARGETOS TARGETARCH TARGETVARIANT ENV GOOS=$TARGETOS GOARCH=$TARGETARCH GOARM=$TARGETVARIANT diff --git a/Dockerfile.bundle b/Dockerfile.bundle index 5e791c07..196dd33d 100644 --- a/Dockerfile.bundle +++ b/Dockerfile.bundle @@ -2,7 +2,7 @@ ARG CORE_IMAGE=core:dev ARG FFMPEG_IMAGE=datarhei/base:alpine-ffmpeg-latest -FROM $CORE_IMAGE as core +FROM $CORE_IMAGE AS core FROM $FFMPEG_IMAGE diff --git a/Makefile b/Makefile index 04a58fe9..9b3ab50b 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ init: ## build: Build core (default) build: - CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GOARM=${GOARM} go build -o core$(BINSUFFIX) -trimpath + CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GOARM=${GOARM} go build -o core$(BINSUFFIX) ## swagger: Update swagger API documentation (requires github.com/swaggo/swag) swagger: diff --git a/app/api/api.go b/app/api/api.go index be3097d6..00470ff0 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -28,8 +28,8 @@ import ( httpfs "github.com/datarhei/core/v16/http/fs" "github.com/datarhei/core/v16/http/router" "github.com/datarhei/core/v16/iam" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + iampolicy "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/math/rand" @@ -670,7 +670,7 @@ func (a *api) start(ctx context.Context) error { return err } - policyAdapter, err := iamaccess.NewJSONAdapter(rfs, "./policy.json", nil) + policyAdapter, err := iampolicy.NewJSONAdapter(rfs, "./policy.json", nil) if err != nil { return err } @@ -695,7 +695,7 @@ func (a *api) start(ctx context.Context) error { // Check if there are already file created by IAM. If not, create policies // and users based on the config in order to mimic the behaviour before IAM. if len(rfs.List("/", fs.ListOptions{Pattern: "/*.json"})) == 0 { - policies := []iamaccess.Policy{ + policies := []iampolicy.Policy{ { Name: "$anon", Domain: "$none", @@ -731,7 +731,7 @@ func (a *api) start(ctx context.Context) error { }, } - policies = append(policies, iamaccess.Policy{ + policies = append(policies, iampolicy.Policy{ Name: cfg.Storage.Memory.Auth.Username, Domain: "$none", Types: []string{"fs"}, @@ -757,7 +757,7 @@ func (a *api) start(ctx context.Context) error { users[s.Auth.Username] = user } - policies = append(policies, iamaccess.Policy{ + policies = append(policies, iampolicy.Policy{ Name: s.Auth.Username, Domain: "$none", Types: []string{"fs"}, @@ -768,7 +768,7 @@ func (a *api) start(ctx context.Context) error { } if cfg.RTMP.Enable && len(cfg.RTMP.Token) == 0 { - policies = append(policies, iamaccess.Policy{ + policies = append(policies, iampolicy.Policy{ Name: "$anon", Domain: "$none", Types: []string{"rtmp"}, @@ -778,7 +778,7 @@ func (a *api) start(ctx context.Context) error { } if cfg.SRT.Enable && len(cfg.SRT.Token) == 0 { - policies = append(policies, iamaccess.Policy{ + policies = append(policies, iampolicy.Policy{ Name: "$anon", Domain: "$none", Types: []string{"srt"}, @@ -1180,7 +1180,7 @@ func (a *api) start(ctx context.Context) error { var store restreamstore.Store = nil - { + if !cfg.Cluster.Enable { fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ Root: cfg.DB.Dir, }) @@ -1989,7 +1989,7 @@ func backupMemFS(target, source fs.Filesystem, patterns []string) error { continue } - target.WriteFileReader(name, file) + target.WriteFileReader(name, file, -1) file.Close() } diff --git a/app/import/import.go b/app/import/import.go index 0694fe28..f8d1daf2 100644 --- a/app/import/import.go +++ b/app/import/import.go @@ -575,11 +575,11 @@ func importV1(fs fs.Filesystem, path string, cfg importConfig) (store.Data, erro ID: "restreamer-ui:ingest:" + cfg.id, Reference: cfg.id, CreatedAt: time.Now().Unix(), - Order: "stop", + Order: app.NewOrder("stop"), } if v1data.Actions.Ingest == "start" { - process.Order = "start" + process.Order = app.NewOrder("start") } config := &app.Config{ @@ -1211,11 +1211,11 @@ func importV1(fs fs.Filesystem, path string, cfg importConfig) (store.Data, erro ID: "restreamer-ui:ingest:" + cfg.id + "_snapshot", Reference: cfg.id, CreatedAt: time.Now().Unix(), - Order: "stop", + Order: app.NewOrder("stop"), } if v1data.Actions.Ingest == "start" { - process.Order = "start" + process.Order = app.NewOrder("start") } snapshotConfig := &app.Config{ @@ -1292,11 +1292,11 @@ func importV1(fs fs.Filesystem, path string, cfg importConfig) (store.Data, erro ID: egressId, Reference: cfg.id, CreatedAt: time.Now().Unix(), - Order: "stop", + Order: app.NewOrder("stop"), } if v1data.Actions.Egress == "start" { - process.Order = "start" + process.Order = app.NewOrder("start") } egress := restreamerUIEgress{ diff --git a/app/import/main_test.go b/app/import/main_test.go index 305110f9..0939b79b 100644 --- a/app/import/main_test.go +++ b/app/import/main_test.go @@ -14,8 +14,8 @@ func TestImport(t *testing.T) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - memfs.WriteFileReader("/mime.types", strings.NewReader("foobar")) - memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("foobar")) + memfs.WriteFileReader("/mime.types", strings.NewReader("foobar"), -1) + memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("foobar"), -1) configstore, err := store.NewJSON(memfs, "/config.json", nil) require.NoError(t, err) diff --git a/app/version.go b/app/version.go index 9c48d880..fb14c6d7 100644 --- a/app/version.go +++ b/app/version.go @@ -29,8 +29,8 @@ func (v versionInfo) MinorString() string { // Version of the app var Version = versionInfo{ Major: 16, - Minor: 19, - Patch: 1, + Minor: 20, + Patch: 0, } // Commit is the git commit the app is build from. It should be filled in during compilation diff --git a/cluster/about.go b/cluster/about.go index d5216bdc..3356faec 100644 --- a/cluster/about.go +++ b/cluster/about.go @@ -22,8 +22,11 @@ type ClusterNodeResources struct { NCPU float64 // Number of CPU on this node CPU float64 // Current CPU load, 0-100*ncpu CPULimit float64 // Defined CPU load limit, 0-100*ncpu + CPUCore float64 // Current CPU load of the core itself, 0-100*ncpu Mem uint64 // Currently used memory in bytes MemLimit uint64 // Defined memory limit in bytes + MemTotal uint64 // Total available memory in bytes + MemCore uint64 // Current used memory of the core itself in bytes Error error } @@ -145,8 +148,11 @@ func (c *cluster) About() (ClusterAbout, error) { NCPU: nodeAbout.Resources.NCPU, CPU: nodeAbout.Resources.CPU, CPULimit: nodeAbout.Resources.CPULimit, + CPUCore: nodeAbout.Resources.CPUCore, Mem: nodeAbout.Resources.Mem, MemLimit: nodeAbout.Resources.MemLimit, + MemTotal: nodeAbout.Resources.MemTotal, + MemCore: nodeAbout.Resources.MemCore, Error: nodeAbout.Resources.Error, }, } diff --git a/cluster/api.go b/cluster/api.go index 4a2328b4..de2f865b 100644 --- a/cluster/api.go +++ b/cluster/api.go @@ -113,6 +113,7 @@ func NewAPI(config APIConfig) (API, error) { a.router.GET("/v1/snaphot", a.Snapshot) a.router.POST("/v1/process", a.ProcessAdd) + a.router.GET("/v1/process/:id", a.ProcessGet) a.router.DELETE("/v1/process/:id", a.ProcessRemove) a.router.PUT("/v1/process/:id", a.ProcessUpdate) a.router.PUT("/v1/process/:id/command", a.ProcessSetCommand) @@ -186,8 +187,11 @@ func (a *api) About(c echo.Context) error { NCPU: resources.CPU.NCPU, CPU: (100 - resources.CPU.Idle) * resources.CPU.NCPU, CPULimit: resources.CPU.Limit * resources.CPU.NCPU, + CPUCore: resources.CPU.Core * resources.CPU.NCPU, Mem: resources.Mem.Total - resources.Mem.Available, - MemLimit: resources.Mem.Total, + MemLimit: resources.Mem.Limit, + MemTotal: resources.Mem.Total, + MemCore: resources.Mem.Core, }, } @@ -387,6 +391,45 @@ func (a *api) ProcessAdd(c echo.Context) error { return c.JSON(http.StatusOK, "OK") } +// ProcessGet gets a process from the cluster DB +// @Summary Get a process +// @Description Get a process from the cluster DB +// @Tags v1.0.0 +// @ID cluster-1-get-process +// @Produce json +// @Param id path string true "Process ID" +// @Param domain query string false "Domain to act on" +// @Param X-Cluster-Origin header string false "Origin ID of request" +// @Success 200 {string} string +// @Failure 404 {object} Error +// @Failure 500 {object} Error +// @Failure 508 {object} Error +// @Router /v1/process/{id} [get] +func (a *api) ProcessGet(c echo.Context) error { + id := util.PathParam(c, "id") + domain := util.DefaultQuery(c, "domain", "") + + origin := c.Request().Header.Get("X-Cluster-Origin") + + if origin == a.id { + return Err(http.StatusLoopDetected, "", "breaking circuit") + } + + pid := app.ProcessID{ID: id, Domain: domain} + + process, nodeid, err := a.cluster.Store().ProcessGet(pid) + if err != nil { + return ErrFromClusterError(err) + } + + res := client.GetProcessResponse{ + Process: process, + NodeID: nodeid, + } + + return c.JSON(http.StatusOK, res) +} + // ProcessRemove removes a process from the cluster DB // @Summary Remove a process // @Description Remove a process from the cluster DB diff --git a/cluster/client/client.go b/cluster/client/client.go index a5df0f49..84ab0230 100644 --- a/cluster/client/client.go +++ b/cluster/client/client.go @@ -9,11 +9,12 @@ import ( "strings" "time" + "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/config" "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/ffmpeg/skills" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + iampolicy "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/restream/app" ) @@ -26,6 +27,11 @@ type AddProcessRequest struct { Config app.Config `json:"config"` } +type GetProcessResponse struct { + Process store.Process `json:"process"` + NodeID string `json:"nodeid"` +} + type UpdateProcessRequest struct { Config app.Config `json:"config"` } @@ -51,7 +57,7 @@ type UpdateIdentityRequest struct { } type SetPoliciesRequest struct { - Policies []iamaccess.Policy `json:"policies"` + Policies []iampolicy.Policy `json:"policies"` } type LockRequest struct { @@ -82,8 +88,11 @@ type AboutResponseResources struct { NCPU float64 `json:"ncpu"` // Number of CPU on this node CPU float64 `json:"cpu"` // Current CPU load, 0-100*ncpu CPULimit float64 `json:"cpu_limit"` // Defined CPU load limit, 0-100*ncpu + CPUCore float64 `json:"cpu_core"` // Current CPU load of the core itself, 0-100*ncpu Mem uint64 `json:"memory_bytes"` // Currently used memory in bytes MemLimit uint64 `json:"memory_limit_bytes"` // Defined memory limit in bytes + MemTotal uint64 `json:"memory_total_bytes"` // Total available memory in bytes + MemCore uint64 `json:"memory_core_bytes"` // Current used memory of the core itself in bytes Error string `json:"error"` // Last error } diff --git a/cluster/client/proces.go b/cluster/client/process.go similarity index 79% rename from cluster/client/proces.go rename to cluster/client/process.go index aee4dd14..b76429cc 100644 --- a/cluster/client/proces.go +++ b/cluster/client/process.go @@ -5,6 +5,7 @@ import ( "net/http" "net/url" + "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/restream/app" ) @@ -20,6 +21,22 @@ func (c *APIClient) ProcessAdd(origin string, r AddProcessRequest) error { return err } +func (c APIClient) ProcessGet(origin string, id app.ProcessID) (store.Process, string, error) { + res := GetProcessResponse{} + + data, err := c.call(http.MethodGet, "/v1/process/"+url.PathEscape(id.ID)+"?domain="+url.QueryEscape(id.Domain), "application/json", nil, origin) + if err != nil { + return store.Process{}, "", err + } + + err = json.Unmarshal(data, &res) + if err != nil { + return store.Process{}, "", err + } + + return res.Process, res.NodeID, nil +} + func (c *APIClient) ProcessRemove(origin string, id app.ProcessID) error { _, err := c.call(http.MethodDelete, "/v1/process/"+url.PathEscape(id.ID)+"?domain="+url.QueryEscape(id.Domain), "application/json", nil, origin) diff --git a/cluster/cluster.go b/cluster/cluster.go index bc367ccc..56d00a40 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -23,8 +23,8 @@ import ( "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/ffmpeg/skills" "github.com/datarhei/core/v16/iam" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + iampolicy "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/resources" @@ -59,6 +59,7 @@ type Cluster interface { HasRaftLeader() bool ProcessAdd(origin string, config *app.Config) error + ProcessGet(origin string, id app.ProcessID, stale bool) (store.Process, string, error) ProcessRemove(origin string, id app.ProcessID) error ProcessUpdate(origin string, id app.ProcessID, config *app.Config) error ProcessSetCommand(origin string, id app.ProcessID, order string) error @@ -70,7 +71,7 @@ type Cluster interface { IAMIdentityAdd(origin string, identity iamidentity.User) error IAMIdentityUpdate(origin, name string, identity iamidentity.User) error IAMIdentityRemove(origin string, name string) error - IAMPoliciesSet(origin, name string, policies []iamaccess.Policy) error + IAMPoliciesSet(origin, name string, policies []iampolicy.Policy) error LockCreate(origin string, name string, validUntil time.Time) (*kvs.Lock, error) LockDelete(origin string, name string) error @@ -143,6 +144,7 @@ type cluster struct { shutdown bool shutdownCh chan struct{} shutdownLock sync.Mutex + shutdownWg sync.WaitGroup syncInterval time.Duration nodeRecoverTimeout time.Duration @@ -354,7 +356,11 @@ func New(config Config) (Cluster, error) { return nil, err } + c.shutdownWg.Add(1) + go func(peerAddress string) { + defer c.shutdownWg.Done() + ticker := time.NewTicker(time.Second) defer ticker.Stop() @@ -390,6 +396,8 @@ func New(config Config) (Cluster, error) { } } + c.shutdownWg.Add(4) + go c.trackNodeChanges() go c.trackLeaderChanges() go c.monitorLeadership() @@ -437,6 +445,8 @@ func (c *cluster) Start(ctx context.Context) error { <-c.shutdownCh + c.shutdownWg.Wait() + return nil } @@ -642,9 +652,10 @@ func (c *cluster) Shutdown() error { c.shutdown = true close(c.shutdownCh) + c.shutdownWg.Wait() + if c.manager != nil { c.manager.NodesClear() - c.manager = nil } if c.api != nil { @@ -656,9 +667,12 @@ func (c *cluster) Shutdown() error { if c.raft != nil { c.raft.Shutdown() - c.raft = nil } + // TODO: here might some situations, where the manager is still need from the synchronize loop and will run into a panic + c.manager = nil + c.raft = nil + return nil } @@ -908,6 +922,8 @@ func (c *cluster) Snapshot(origin string) (io.ReadCloser, error) { } func (c *cluster) trackNodeChanges() { + defer c.shutdownWg.Done() + ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() @@ -1001,6 +1017,8 @@ func (c *cluster) getClusterBarrier(name string) (bool, error) { // trackLeaderChanges registers an Observer with raft in order to receive updates // about leader changes, in order to keep the forwarder up to date. func (c *cluster) trackLeaderChanges() { + defer c.shutdownWg.Done() + for { select { case leaderAddress := <-c.raftLeaderObservationCh: @@ -1061,6 +1079,8 @@ func (c *cluster) applyCommand(cmd *store.Command) error { } func (c *cluster) sentinel() { + defer c.shutdownWg.Done() + ticker := time.NewTicker(time.Second) defer ticker.Stop() diff --git a/cluster/docs/ClusterAPI_docs.go b/cluster/docs/ClusterAPI_docs.go index 90240e9d..be2fd046 100644 --- a/cluster/docs/ClusterAPI_docs.go +++ b/cluster/docs/ClusterAPI_docs.go @@ -51,6 +51,33 @@ const docTemplateClusterAPI = `{ } } }, + "/v1/about": { + "get": { + "description": "The cluster version", + "produces": [ + "application/json" + ], + "tags": [ + "v1.0.0" + ], + "summary": "The cluster version", + "operationId": "cluster-1-about", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/cluster.Error" + } + } + } + } + }, "/v1/barrier/{name}": { "get": { "description": "Has the barrier already has been passed", @@ -535,7 +562,7 @@ const docTemplateClusterAPI = `{ "operationId": "cluster-1-lock", "parameters": [ { - "description": "Lock request", + "description": "LockCreate request", "name": "data", "in": "body", "required": true, diff --git a/cluster/docs/ClusterAPI_swagger.json b/cluster/docs/ClusterAPI_swagger.json index 711c3c88..70cf3f4e 100644 --- a/cluster/docs/ClusterAPI_swagger.json +++ b/cluster/docs/ClusterAPI_swagger.json @@ -43,6 +43,33 @@ } } }, + "/v1/about": { + "get": { + "description": "The cluster version", + "produces": [ + "application/json" + ], + "tags": [ + "v1.0.0" + ], + "summary": "The cluster version", + "operationId": "cluster-1-about", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/cluster.Error" + } + } + } + } + }, "/v1/barrier/{name}": { "get": { "description": "Has the barrier already has been passed", @@ -527,7 +554,7 @@ "operationId": "cluster-1-lock", "parameters": [ { - "description": "Lock request", + "description": "LockCreate request", "name": "data", "in": "body", "required": true, diff --git a/cluster/docs/ClusterAPI_swagger.yaml b/cluster/docs/ClusterAPI_swagger.yaml index e9052e6e..c0628aca 100644 --- a/cluster/docs/ClusterAPI_swagger.yaml +++ b/cluster/docs/ClusterAPI_swagger.yaml @@ -896,6 +896,24 @@ paths: summary: The cluster version tags: - v1.0.0 + /v1/about: + get: + description: The cluster version + operationId: cluster-1-about + produces: + - application/json + responses: + "200": + description: OK + schema: + type: string + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/cluster.Error' + summary: The cluster version + tags: + - v1.0.0 /v1/barrier/{name}: get: description: Has the barrier already has been passed @@ -1216,7 +1234,7 @@ paths: description: Acquire a named lock operationId: cluster-1-lock parameters: - - description: Lock request + - description: LockCreate request in: body name: data required: true diff --git a/cluster/forwarder/iam.go b/cluster/forwarder/iam.go index bd695380..b310604d 100644 --- a/cluster/forwarder/iam.go +++ b/cluster/forwarder/iam.go @@ -2,8 +2,8 @@ package forwarder import ( apiclient "github.com/datarhei/core/v16/cluster/client" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + iampolicy "github.com/datarhei/core/v16/iam/policy" ) func (f *Forwarder) IAMIdentityAdd(origin string, identity iamidentity.User) error { @@ -38,7 +38,7 @@ func (f *Forwarder) IAMIdentityUpdate(origin, name string, identity iamidentity. return reconstructError(client.IAMIdentityUpdate(origin, name, r)) } -func (f *Forwarder) IAMPoliciesSet(origin, name string, policies []iamaccess.Policy) error { +func (f *Forwarder) IAMPoliciesSet(origin, name string, policies []iampolicy.Policy) error { if origin == "" { origin = f.ID } diff --git a/cluster/forwarder/process.go b/cluster/forwarder/process.go index 12a36392..acf6b997 100644 --- a/cluster/forwarder/process.go +++ b/cluster/forwarder/process.go @@ -2,6 +2,7 @@ package forwarder import ( apiclient "github.com/datarhei/core/v16/cluster/client" + "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/restream/app" ) @@ -21,6 +22,20 @@ func (f *Forwarder) ProcessAdd(origin string, config *app.Config) error { return reconstructError(client.ProcessAdd(origin, r)) } +func (f *Forwarder) ProcessGet(origin string, id app.ProcessID) (store.Process, string, error) { + if origin == "" { + origin = f.ID + } + + f.lock.RLock() + client := f.client + f.lock.RUnlock() + + process, nodeid, err := client.ProcessGet(origin, id) + + return process, nodeid, reconstructError(err) +} + func (f *Forwarder) ProcessUpdate(origin string, id app.ProcessID, config *app.Config) error { if origin == "" { origin = f.ID diff --git a/cluster/iam.go b/cluster/iam.go index 2dc30616..03b044e7 100644 --- a/cluster/iam.go +++ b/cluster/iam.go @@ -8,8 +8,8 @@ import ( clusteriamadapter "github.com/datarhei/core/v16/cluster/iam/adapter" "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/iam" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + iampolicy "github.com/datarhei/core/v16/iam/policy" ) func (c *cluster) IAM(superuser iamidentity.User, jwtRealm, jwtSecret string) (iam.IAM, error) { @@ -54,13 +54,13 @@ func (c *cluster) ListIdentity(name string) (time.Time, iamidentity.User, error) return user.UpdatedAt, user.Users[0], nil } -func (c *cluster) ListPolicies() (time.Time, []iamaccess.Policy) { +func (c *cluster) ListPolicies() (time.Time, []iampolicy.Policy) { policies := c.store.IAMPolicyList() return policies.UpdatedAt, policies.Policies } -func (c *cluster) ListUserPolicies(name string) (time.Time, []iamaccess.Policy) { +func (c *cluster) ListUserPolicies(name string) (time.Time, []iampolicy.Policy) { policies := c.store.IAMIdentityPolicyList(name) return policies.UpdatedAt, policies.Policies @@ -101,7 +101,7 @@ func (c *cluster) IAMIdentityUpdate(origin, name string, identity iamidentity.Us return c.applyCommand(cmd) } -func (c *cluster) IAMPoliciesSet(origin, name string, policies []iamaccess.Policy) error { +func (c *cluster) IAMPoliciesSet(origin, name string, policies []iampolicy.Policy) error { if !c.IsRaftLeader() { return c.forwarder.IAMPoliciesSet(origin, name, policies) } diff --git a/cluster/iam/adapter/policy.go b/cluster/iam/adapter/policy.go index b6250ece..7df2d203 100644 --- a/cluster/iam/adapter/policy.go +++ b/cluster/iam/adapter/policy.go @@ -4,9 +4,7 @@ import ( "sync" "github.com/datarhei/core/v16/cluster/store" - iamaccess "github.com/datarhei/core/v16/iam/access" - - "github.com/casbin/casbin/v2/model" + "github.com/datarhei/core/v16/iam/policy" ) type policyAdapter struct { @@ -15,7 +13,7 @@ type policyAdapter struct { lock sync.RWMutex } -func NewPolicyAdapter(store store.Store) (iamaccess.Adapter, error) { +func NewPolicyAdapter(store store.Store) (policy.Adapter, error) { a := &policyAdapter{ store: store, domains: map[string]struct{}{}, @@ -24,13 +22,13 @@ func NewPolicyAdapter(store store.Store) (iamaccess.Adapter, error) { return a, nil } -func (a *policyAdapter) LoadPolicy(model model.Model) error { - policies := a.store.IAMPolicyList() +func (a *policyAdapter) LoadPolicy(model policy.Model) error { + storePolicies := a.store.IAMPolicyList() - rules := [][]string{} + policies := []policy.Policy{} domains := map[string]struct{}{} - for _, p := range policies.Policies { + for _, p := range storePolicies.Policies { if len(p.Domain) == 0 { p.Domain = "$none" } @@ -39,19 +37,20 @@ func (a *policyAdapter) LoadPolicy(model model.Model) error { p.Types = []string{"$none"} } - rule := []string{ - p.Name, - p.Domain, - iamaccess.EncodeResource(p.Types, p.Resource), - iamaccess.EncodeActions(p.Actions), + policy := policy.Policy{ + Name: p.Name, + Domain: p.Domain, + Types: p.Types, + Resource: p.Resource, + Actions: p.Actions, } domains[p.Domain] = struct{}{} - rules = append(rules, rule) + policies = append(policies, policy) } - model.AddPolicies("p", "p", rules) + model.AddPolicies(policies) a.lock.Lock() a.domains = domains @@ -60,27 +59,23 @@ func (a *policyAdapter) LoadPolicy(model model.Model) error { return nil } -func (a *policyAdapter) SavePolicy(model model.Model) error { - return nil -} - -func (a *policyAdapter) AddPolicy(sec, ptype string, rule []string) error { +func (a *policyAdapter) SavePolicy(_ policy.Model) error { return nil } -func (a *policyAdapter) AddPolicies(sec string, ptype string, rules [][]string) error { +func (a *policyAdapter) AddPolicy(_ policy.Policy) error { return nil } -func (a *policyAdapter) RemovePolicy(sec string, ptype string, rule []string) error { +func (a *policyAdapter) AddPolicies(_ []policy.Policy) error { return nil } -func (a *policyAdapter) RemovePolicies(sec string, ptype string, rules [][]string) error { +func (a *policyAdapter) RemovePolicy(_ policy.Policy) error { return nil } -func (a *policyAdapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { +func (a *policyAdapter) RemovePolicies(_ []policy.Policy) error { return nil } diff --git a/cluster/iam/iam.go b/cluster/iam/iam.go index fc57fdc0..0b119d4c 100644 --- a/cluster/iam/iam.go +++ b/cluster/iam/iam.go @@ -5,8 +5,8 @@ import ( "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/iam" - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/log" ) @@ -84,7 +84,7 @@ func (m *manager) RemovePolicy(name, domain string, types []string, resource str return ErrClusterMode } -func (m *manager) ListPolicies(name, domain string, types []string, resource string, actions []string) []access.Policy { +func (m *manager) ListPolicies(name, domain string, types []string, resource string, actions []string) []policy.Policy { return m.iam.ListPolicies(name, domain, types, resource, actions) } diff --git a/cluster/leader.go b/cluster/leader.go index 32b24382..5358d03e 100644 --- a/cluster/leader.go +++ b/cluster/leader.go @@ -19,6 +19,8 @@ const NOTIFY_LEADER = 1 const NOTIFY_EMERGENCY = 2 func (c *cluster) monitorLeadership() { + defer c.shutdownWg.Done() + // We use the notify channel we configured Raft with, NOT Raft's // leaderCh, which is only notified best-effort. Doing this ensures // that we get all notifications in order, which is required for @@ -479,7 +481,7 @@ type processOpError struct { err error } -func (c *cluster) applyOpStack(stack []interface{}, term uint64) []processOpError { +func (c *cluster) applyOpStack(stack []interface{}, term uint64, runners int) []processOpError { errors := []processOpError{} logger := c.logger.WithFields(log.Fields{ @@ -488,6 +490,7 @@ func (c *cluster) applyOpStack(stack []interface{}, term uint64) []processOpErro }) errChan := make(chan processOpError, len(stack)) + opChan := make(chan interface{}, len(stack)) wgReader := sync.WaitGroup{} wgReader.Add(1) @@ -500,18 +503,28 @@ func (c *cluster) applyOpStack(stack []interface{}, term uint64) []processOpErro }(errChan) wg := sync.WaitGroup{} - for _, op := range stack { + + for i := 0; i < runners; i++ { wg.Add(1) - go func(errChan chan<- processOpError, op interface{}, logger log.Logger) { - opErr := c.applyOp(op, logger) - if opErr.err != nil { - errChan <- opErr + go func(errChan chan<- processOpError, opChan <-chan interface{}, logger log.Logger) { + defer wg.Done() + + for op := range opChan { + opErr := c.applyOp(op, logger) + if opErr.err != nil { + errChan <- opErr + } } - wg.Done() - }(errChan, op, logger) + }(errChan, opChan, logger) + } + + for _, op := range stack { + opChan <- op } + close(opChan) + wg.Wait() close(errChan) @@ -624,6 +637,36 @@ func (c *cluster) applyOp(op interface{}, logger log.Logger) processOpError { break } + // Transfer report with best effort, it's ok if it fails. + err = c.manager.ProcessCommand(v.fromNodeid, v.config.ProcessID(), "stop") + if err == nil { + process, err := c.manager.ProcessGet(v.fromNodeid, v.config.ProcessID(), []string{"report"}) + if err != nil { + logger.Info().WithError(err).WithFields(log.Fields{ + "processid": v.config.ProcessID(), + "fromnodeid": v.fromNodeid, + "tonodeid": v.toNodeid, + }).Log("Moving process, get process report") + } + if process.Report != nil && err == nil { + report := process.Report.Marshal() + err = c.manager.ProcessReportSet(v.toNodeid, v.config.ProcessID(), &report) + if err != nil { + logger.Info().WithError(err).WithFields(log.Fields{ + "processid": v.config.ProcessID(), + "fromnodeid": v.fromNodeid, + "tonodeid": v.toNodeid, + }).Log("Moving process, set process report") + } + } + } else { + logger.Info().WithError(err).WithFields(log.Fields{ + "processid": v.config.ProcessID(), + "fromnodeid": v.fromNodeid, + "tonodeid": v.toNodeid, + }).Log("Moving process, stopping process") + } + err = c.manager.ProcessDelete(v.fromNodeid, v.config.ProcessID()) if err != nil { opErr = processOpError{ diff --git a/cluster/leader_rebalance.go b/cluster/leader_rebalance.go index ebfc3120..c583f1ac 100644 --- a/cluster/leader_rebalance.go +++ b/cluster/leader_rebalance.go @@ -3,7 +3,6 @@ package cluster import ( "github.com/datarhei/core/v16/cluster/node" "github.com/datarhei/core/v16/cluster/store" - "github.com/datarhei/core/v16/log" ) func (c *cluster) doRebalance(emergency bool, term uint64) { @@ -17,8 +16,12 @@ func (c *cluster) doRebalance(emergency bool, term uint64) { logger.Debug().WithField("emergency", emergency).Log("Rebalancing") storeNodes := c.store.NodeList() - have := c.manager.ClusterProcessList() nodes := c.manager.NodeList() + have, err := c.manager.ClusterProcessList() + if err != nil { + logger.Warn().WithError(err).Log("Failed to retrieve complete process list") + return + } nodesMap := map[string]node.About{} @@ -32,18 +35,13 @@ func (c *cluster) doRebalance(emergency bool, term uint64) { nodesMap[about.ID] = about } - logger.Debug().WithFields(log.Fields{ - "have": have, - "nodes": nodesMap, - }).Log("Rebalance") - opStack, _ := rebalance(have, nodesMap) - errors := c.applyOpStack(opStack, term) + errors := c.applyOpStack(opStack, term, 5) for _, e := range errors { // Only apply the command if the error is different. - process, err := c.store.ProcessGet(e.processid) + process, _, err := c.store.ProcessGet(e.processid) if err != nil { continue } diff --git a/cluster/leader_relocate.go b/cluster/leader_relocate.go index 5879a1a9..285a087f 100644 --- a/cluster/leader_relocate.go +++ b/cluster/leader_relocate.go @@ -3,7 +3,6 @@ package cluster import ( "github.com/datarhei/core/v16/cluster/node" "github.com/datarhei/core/v16/cluster/store" - "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/restream/app" ) @@ -19,8 +18,12 @@ func (c *cluster) doRelocate(emergency bool, term uint64) { relocateMap := c.store.ProcessGetRelocateMap() storeNodes := c.store.NodeList() - have := c.manager.ClusterProcessList() nodes := c.manager.NodeList() + have, err := c.manager.ClusterProcessList() + if err != nil { + logger.Warn().WithError(err).Log("Failed to retrieve complete process list") + return + } nodesMap := map[string]node.About{} @@ -34,19 +37,13 @@ func (c *cluster) doRelocate(emergency bool, term uint64) { nodesMap[about.ID] = about } - logger.Debug().WithFields(log.Fields{ - "relocate": relocate, - "have": have, - "nodes": nodesMap, - }).Log("Rebalance") - opStack, _, relocatedProcessIDs := relocate(have, nodesMap, relocateMap) - errors := c.applyOpStack(opStack, term) + errors := c.applyOpStack(opStack, term, 5) for _, e := range errors { // Only apply the command if the error is different. - process, err := c.store.ProcessGet(e.processid) + process, _, err := c.store.ProcessGet(e.processid) if err != nil { continue } @@ -200,6 +197,9 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[ haveReferenceAffinity.Move(process.Config.Reference, process.Config.Domain, sourceNodeid, targetNodeid) relocatedProcessIDs = append(relocatedProcessIDs, processid) + + // Move only one process at a time. + break } return opStack, resources.Map(), relocatedProcessIDs diff --git a/cluster/leader_synchronize.go b/cluster/leader_synchronize.go index 60508433..b597d78e 100644 --- a/cluster/leader_synchronize.go +++ b/cluster/leader_synchronize.go @@ -8,19 +8,22 @@ import ( "github.com/datarhei/core/v16/cluster/node" "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/encoding/json" - "github.com/datarhei/core/v16/log" ) func (c *cluster) doSynchronize(emergency bool, term uint64) { + logger := c.logger.WithField("term", term) + + logger.Debug().WithField("emergency", emergency).Log("Synchronizing") + wish := c.store.ProcessGetNodeMap() want := c.store.ProcessList() storeNodes := c.store.NodeList() - have := c.manager.ClusterProcessList() nodes := c.manager.NodeList() - - logger := c.logger.WithField("term", term) - - logger.Debug().WithField("emergency", emergency).Log("Synchronizing") + have, err := c.manager.ClusterProcessList() + if err != nil { + logger.Warn().WithError(err).Log("Failed to retrieve complete process list") + return + } nodesMap := map[string]node.About{} @@ -34,12 +37,6 @@ func (c *cluster) doSynchronize(emergency bool, term uint64) { nodesMap[about.ID] = about } - logger.Debug().WithFields(log.Fields{ - "want": want, - "have": have, - "nodes": nodesMap, - }).Log("Synchronize") - opStack, _, reality := synchronize(wish, want, have, nodesMap, c.nodeRecoverTimeout) if !emergency && !maps.Equal(wish, reality) { @@ -53,12 +50,12 @@ func (c *cluster) doSynchronize(emergency bool, term uint64) { c.applyCommand(cmd) } - errors := c.applyOpStack(opStack, term) + errors := c.applyOpStack(opStack, term, 5) if !emergency { for _, e := range errors { // Only apply the command if the error is different. - process, err := c.store.ProcessGet(e.processid) + process, _, err := c.store.ProcessGet(e.processid) if err != nil { continue } @@ -163,6 +160,11 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce } opStack := []interface{}{} + opStackStart := []interface{}{} + opStackDelete := []interface{}{} + opStackUpdate := []interface{}{} + opStackAdd := []interface{}{} + opBudget := 100 // Now we iterate through the processes we actually have running on the nodes // and remove them from the wantMap. We also make sure that they have the correct order. @@ -175,7 +177,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce wantP, ok := wantMap[pid] if !ok { // The process is not on the wantMap. Delete it and adjust the resources. - opStack = append(opStack, processOpDelete{ + opStackDelete = append(opStackDelete, processOpDelete{ nodeid: haveP.NodeID, processid: haveP.Config.ProcessID(), }) @@ -188,16 +190,18 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce // The process is on the wantMap. Update the process if the configuration and/or metadata differ. hasConfigChanges := !wantP.Config.Equal(haveP.Config) hasMetadataChanges, metadata := isMetadataUpdateRequired(wantP.Metadata, haveP.Metadata) - if hasConfigChanges || hasMetadataChanges { + if (hasConfigChanges || hasMetadataChanges) && opBudget > 0 { // TODO: When the required resources increase, should we move this process to a node // that has them available? Otherwise, this node might start throttling. However, this // will result in rebalancing. - opStack = append(opStack, processOpUpdate{ + opStackUpdate = append(opStackUpdate, processOpUpdate{ nodeid: haveP.NodeID, processid: haveP.Config.ProcessID(), config: wantP.Config, metadata: metadata, }) + + opBudget -= 3 } delete(wantMap, pid) @@ -270,10 +274,16 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce } */ - opStack = append(opStack, processOpStart{ + opStackStart = append(opStackStart, processOpStart{ nodeid: nodeid, processid: haveP.Config.ProcessID(), }) + + opBudget -= 3 + + if opBudget <= 0 { + break + } } have = haveAfterRemove @@ -352,13 +362,15 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce } if len(nodeid) != 0 { - opStack = append(opStack, processOpAdd{ + opStackAdd = append(opStackAdd, processOpAdd{ nodeid: nodeid, config: wantP.Config, metadata: wantP.Metadata, order: wantP.Order, }) + opBudget -= 3 + // Consume the resources resources.Add(nodeid, wantP.Config.LimitCPU, wantP.Config.LimitMemory) @@ -371,7 +383,16 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce err: errNotEnoughResourcesForDeployment, }) } + + if opBudget <= 0 { + break + } } + opStack = append(opStack, opStackDelete...) + opStack = append(opStack, opStackUpdate...) + opStack = append(opStack, opStackStart...) + opStack = append(opStack, opStackAdd...) + return opStack, resources.Map(), reality } diff --git a/cluster/node/core.go b/cluster/node/core.go index 94a919fa..2af52762 100644 --- a/cluster/node/core.go +++ b/cluster/node/core.go @@ -66,19 +66,21 @@ func (n *Core) SetEssentials(address string, config *config.Config) { n.lock.Lock() defer n.lock.Unlock() - if address != n.address { + if n.address != address { n.address = address n.client = nil // force reconnet } - if n.config == nil && config != nil { - n.config = config - n.client = nil // force reconnect - } + if config != nil { + if n.config == nil { + n.config = config + n.client = nil // force reconnect + } - if n.config.UpdatedAt != config.UpdatedAt { - n.config = config - n.client = nil // force reconnect + if n.config != nil && n.config.UpdatedAt != config.UpdatedAt { + n.config = config + n.client = nil // force reconnect + } } } @@ -312,7 +314,6 @@ func (n *Core) About() (CoreAbout, error) { } func (n *Core) ProcessAdd(config *app.Config, metadata map[string]interface{}) error { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -325,7 +326,6 @@ func (n *Core) ProcessAdd(config *app.Config, metadata map[string]interface{}) e } func (n *Core) ProcessCommand(id app.ProcessID, command string) error { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -338,7 +338,6 @@ func (n *Core) ProcessCommand(id app.ProcessID, command string) error { } func (n *Core) ProcessDelete(id app.ProcessID) error { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -351,7 +350,6 @@ func (n *Core) ProcessDelete(id app.ProcessID) error { } func (n *Core) ProcessUpdate(id app.ProcessID, config *app.Config, metadata map[string]interface{}) error { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -363,8 +361,19 @@ func (n *Core) ProcessUpdate(id app.ProcessID, config *app.Config, metadata map[ return client.ProcessUpdate(id, config, metadata) } -func (n *Core) ProcessProbe(id app.ProcessID) (api.Probe, error) { +func (n *Core) ProcessReportSet(id app.ProcessID, report *app.Report) error { + n.lock.RLock() + client := n.client + n.lock.RUnlock() + + if client == nil { + return ErrNoPeer + } + + return client.ProcessReportSet(id, report) +} +func (n *Core) ProcessProbe(id app.ProcessID) (api.Probe, error) { n.lock.RLock() client := n.client n.lock.RUnlock() @@ -384,7 +393,6 @@ func (n *Core) ProcessProbe(id app.ProcessID) (api.Probe, error) { } func (n *Core) ProcessProbeConfig(config *app.Config) (api.Probe, error) { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -404,7 +412,6 @@ func (n *Core) ProcessProbeConfig(config *app.Config) (api.Probe, error) { } func (n *Core) ProcessList(options client.ProcessListOptions) ([]api.Process, error) { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -417,7 +424,6 @@ func (n *Core) ProcessList(options client.ProcessListOptions) ([]api.Process, er } func (n *Core) FilesystemList(storage, pattern string) ([]api.FileInfo, error) { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -439,7 +445,6 @@ func (n *Core) FilesystemList(storage, pattern string) ([]api.FileInfo, error) { } func (n *Core) FilesystemDeleteFile(storage, path string) error { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -452,7 +457,6 @@ func (n *Core) FilesystemDeleteFile(storage, path string) error { } func (n *Core) FilesystemPutFile(storage, path string, data io.Reader) error { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -465,7 +469,6 @@ func (n *Core) FilesystemPutFile(storage, path string, data io.Reader) error { } func (n *Core) FilesystemGetFileInfo(storage, path string) (int64, time.Time, error) { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -487,7 +490,6 @@ func (n *Core) FilesystemGetFileInfo(storage, path string) (int64, time.Time, er } func (n *Core) FilesystemGetFile(storage, path string, offset int64) (io.ReadCloser, error) { - n.lock.RLock() client := n.client n.lock.RUnlock() @@ -794,10 +796,12 @@ func (n *Core) ClusterProcessList() ([]Process, error) { UpdatedAt: time.Unix(p.UpdatedAt, 0), } - config, metadata := p.Config.Marshal() + config, _ := p.Config.Marshal() process.Config = config - process.Metadata = metadata + if p.Metadata != nil { + process.Metadata = p.Metadata.(map[string]interface{}) + } processes = append(processes, process) } diff --git a/cluster/node/manager.go b/cluster/node/manager.go index d0c345b7..5518a59e 100644 --- a/cluster/node/manager.go +++ b/cluster/node/manager.go @@ -409,12 +409,14 @@ func (p *Manager) FilesystemList(storage, pattern string) []api.FileInfo { return filesList } -func (p *Manager) ClusterProcessList() []Process { +func (p *Manager) ClusterProcessList() ([]Process, error) { processChan := make(chan []Process, 64) processList := []Process{} + errorChan := make(chan error, 8) + errorList := []error{} wgList := sync.WaitGroup{} - wgList.Add(1) + wgList.Add(2) go func() { defer wgList.Done() @@ -424,53 +426,46 @@ func (p *Manager) ClusterProcessList() []Process { } }() + go func() { + defer wgList.Done() + + for err := range errorChan { + errorList = append(errorList, err) + } + }() + wg := sync.WaitGroup{} p.lock.RLock() for _, n := range p.nodes { wg.Add(1) - go func(node *Node, p chan<- []Process) { + go func(node *Node, p chan<- []Process, e chan<- error) { defer wg.Done() processes, err := node.Core().ClusterProcessList() if err != nil { + e <- err return } p <- processes - }(n, processChan) + }(n, processChan, errorChan) } p.lock.RUnlock() wg.Wait() close(processChan) + close(errorChan) wgList.Wait() - return processList -} - -func (p *Manager) ProcessFindNodeID(id app.ProcessID) (string, error) { - procs := p.ClusterProcessList() - nodeid := "" - - for _, p := range procs { - if p.Config.ProcessID() != id { - continue - } - - nodeid = p.NodeID - - break - } - - if len(nodeid) == 0 { - return "", fmt.Errorf("the process '%s' is not registered with any node", id.String()) + if len(errorList) != 0 { + return nil, fmt.Errorf("not all nodes responded wit their process list") } - return nodeid, nil + return processList, nil } func (p *Manager) FindNodeForResources(nodeid string, cpu float64, memory uint64) string { @@ -540,6 +535,24 @@ func (p *Manager) ProcessList(options client.ProcessListOptions) []api.Process { return processList } +func (p *Manager) ProcessGet(nodeid string, id app.ProcessID, filter []string) (api.Process, error) { + node, err := p.NodeGet(nodeid) + if err != nil { + return api.Process{}, fmt.Errorf("node not found: %w", err) + } + + list, err := node.Core().ProcessList(client.ProcessListOptions{ + ID: []string{id.ID}, + Filter: filter, + Domain: id.Domain, + }) + if err != nil { + return api.Process{}, err + } + + return list[0], nil +} + func (p *Manager) ProcessAdd(nodeid string, config *app.Config, metadata map[string]interface{}) error { node, err := p.NodeGet(nodeid) if err != nil { @@ -567,6 +580,15 @@ func (p *Manager) ProcessUpdate(nodeid string, id app.ProcessID, config *app.Con return node.Core().ProcessUpdate(id, config, metadata) } +func (p *Manager) ProcessReportSet(nodeid string, id app.ProcessID, report *app.Report) error { + node, err := p.NodeGet(nodeid) + if err != nil { + return fmt.Errorf("node not found: %w", err) + } + + return node.Core().ProcessReportSet(id, report) +} + func (p *Manager) ProcessCommand(nodeid string, id app.ProcessID, command string) error { node, err := p.NodeGet(nodeid) if err != nil { diff --git a/cluster/node/node.go b/cluster/node/node.go index 70ee5c64..91bf2085 100644 --- a/cluster/node/node.go +++ b/cluster/node/node.go @@ -109,9 +109,7 @@ func New(config Config) *Node { } func (n *Node) Stop() error { - n.lock.Lock() - defer n.lock.Unlock() if n.cancel == nil { @@ -148,8 +146,11 @@ type Resources struct { NCPU float64 // Number of CPU on this node CPU float64 // Current CPU load, 0-100*ncpu CPULimit float64 // Defined CPU load limit, 0-100*ncpu + CPUCore float64 // Current CPU load of the core itself, 0-100*ncpu Mem uint64 // Currently used memory in bytes MemLimit uint64 // Defined memory limit in bytes + MemTotal uint64 // Total available memory in bytes + MemCore uint64 // Current used memory of the core itself in bytes Error error // Last error } @@ -509,8 +510,11 @@ func (n *Node) ping(ctx context.Context, interval time.Duration) { NCPU: about.Resources.NCPU, CPU: about.Resources.CPU, CPULimit: about.Resources.CPULimit, + CPUCore: about.Resources.CPUCore, Mem: about.Resources.Mem, MemLimit: about.Resources.MemLimit, + MemTotal: about.Resources.MemTotal, + MemCore: about.Resources.MemCore, Error: nil, }, } diff --git a/cluster/process.go b/cluster/process.go index 85cab534..c67f0de2 100644 --- a/cluster/process.go +++ b/cluster/process.go @@ -22,6 +22,21 @@ func (c *cluster) ProcessAdd(origin string, config *app.Config) error { return c.applyCommand(cmd) } +func (c *cluster) ProcessGet(origin string, id app.ProcessID, stale bool) (store.Process, string, error) { + if !stale { + if !c.IsRaftLeader() { + return c.forwarder.ProcessGet(origin, id) + } + } + + process, nodeid, err := c.store.ProcessGet(id) + if err != nil { + return store.Process{}, "", err + } + + return process, nodeid, nil +} + func (c *cluster) ProcessRemove(origin string, id app.ProcessID) error { if !c.IsRaftLeader() { return c.forwarder.ProcessRemove(origin, id) @@ -70,7 +85,7 @@ func (c *cluster) ProcessSetCommand(origin string, id app.ProcessID, command str return c.applyCommand(cmd) } - nodeid, err := c.manager.ProcessFindNodeID(id) + nodeid, err := c.store.ProcessGetNode(id) if err != nil { return fmt.Errorf("the process '%s' is not registered with any node: %w", id.String(), err) } @@ -111,7 +126,7 @@ func (c *cluster) ProcessSetMetadata(origin string, id app.ProcessID, key string } func (c *cluster) ProcessGetMetadata(origin string, id app.ProcessID, key string) (interface{}, error) { - p, err := c.store.ProcessGet(id) + p, _, err := c.store.ProcessGet(id) if err != nil { return nil, err } diff --git a/cluster/store/identity_test.go b/cluster/store/identity_test.go index 0253040e..7f74d765 100644 --- a/cluster/store/identity_test.go +++ b/cluster/store/identity_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/stretchr/testify/require" ) @@ -394,7 +394,7 @@ func TestUpdateIdentityWithPolicies(t *testing.T) { Name: "foobar", } - policies := []access.Policy{ + policies := []policy.Policy{ { Name: "bla", Domain: "bla", diff --git a/cluster/store/policy.go b/cluster/store/policy.go index 2060cf7f..04ded03a 100644 --- a/cluster/store/policy.go +++ b/cluster/store/policy.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/datarhei/core/v16/iam/access" + "github.com/datarhei/core/v16/iam/policy" ) func (s *store) setPolicies(cmd CommandSetPolicies) error { @@ -80,9 +80,9 @@ func (s *store) IAMIdentityPolicyList(name string) Policies { } // updatePolicy updates a policy such that the resource type is split off the resource -func (s *store) updatePolicy(p access.Policy) access.Policy { +func (s *store) updatePolicy(p policy.Policy) policy.Policy { if len(p.Types) == 0 { - p.Types, p.Resource = access.DecodeResource(p.Resource) + p.Types, p.Resource = policy.DecodeResource(p.Resource) } return p diff --git a/cluster/store/policy_test.go b/cluster/store/policy_test.go index dde9cfc3..c00efc76 100644 --- a/cluster/store/policy_test.go +++ b/cluster/store/policy_test.go @@ -3,8 +3,8 @@ package store import ( "testing" - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/stretchr/testify/require" ) @@ -30,7 +30,7 @@ func TestSetPoliciesCommand(t *testing.T) { Operation: OpSetPolicies, Data: CommandSetPolicies{ Name: "foobar", - Policies: []access.Policy{ + Policies: []policy.Policy{ { Name: "bla", Domain: "bla", @@ -59,7 +59,7 @@ func TestSetPolicies(t *testing.T) { Name: "foobar", } - policies := []access.Policy{ + policies := []policy.Policy{ { Name: "bla", Domain: "bla", diff --git a/cluster/store/process.go b/cluster/store/process.go index f14d4c95..d04b7129 100644 --- a/cluster/store/process.go +++ b/cluster/store/process.go @@ -2,6 +2,7 @@ package store import ( "fmt" + "maps" "time" "github.com/datarhei/core/v16/restream/app" @@ -72,11 +73,11 @@ func (s *store) updateProcess(cmd CommandUpdateProcess) error { return fmt.Errorf("the process with the ID '%s' doesn't exists%w", srcid, ErrNotFound) } - if p.Config.Equal(cmd.Config) { - return nil - } - if srcid == dstid { + if p.Config.Equal(cmd.Config) { + return nil + } + p.UpdatedAt = time.Now() p.Config = cmd.Config @@ -219,36 +220,44 @@ func (s *store) ProcessList() []Process { return processes } -func (s *store) ProcessGet(id app.ProcessID) (Process, error) { +func (s *store) ProcessGet(id app.ProcessID) (Process, string, error) { s.lock.RLock() defer s.lock.RUnlock() process, ok := s.data.Process[id.String()] if !ok { - return Process{}, fmt.Errorf("not found%w", ErrNotFound) + return Process{}, "", fmt.Errorf("not found%w", ErrNotFound) } + nodeid := s.data.ProcessNodeMap[id.String()] + return Process{ CreatedAt: process.CreatedAt, UpdatedAt: process.UpdatedAt, Config: process.Config.Clone(), Order: process.Order, - Metadata: process.Metadata, + Metadata: maps.Clone(process.Metadata), Error: process.Error, - }, nil + }, nodeid, nil } func (s *store) ProcessGetNodeMap() map[string]string { s.lock.RLock() defer s.lock.RUnlock() - m := map[string]string{} + return maps.Clone(s.data.ProcessNodeMap) +} - for key, value := range s.data.ProcessNodeMap { - m[key] = value +func (s *store) ProcessGetNode(id app.ProcessID) (string, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + nodeid, hasProcess := s.data.ProcessNodeMap[id.String()] + if !hasProcess { + return "", ErrNotFound } - return m + return nodeid, nil } func (s *store) ProcessGetRelocateMap() map[string]string { diff --git a/cluster/store/process_test.go b/cluster/store/process_test.go index 03a33587..8d790cd2 100644 --- a/cluster/store/process_test.go +++ b/cluster/store/process_test.go @@ -301,13 +301,13 @@ func TestUpdateProcess(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, len(s.data.Process)) - _, err = s.ProcessGet(config1.ProcessID()) + _, _, err = s.ProcessGet(config1.ProcessID()) require.Error(t, err) - _, err = s.ProcessGet(config2.ProcessID()) + _, _, err = s.ProcessGet(config2.ProcessID()) require.NoError(t, err) - _, err = s.ProcessGet(config.ProcessID()) + _, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) } @@ -330,7 +330,7 @@ func TestSetProcessOrderCommand(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, s.data.Process) - p, err := s.ProcessGet(config.ProcessID()) + p, _, err := s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "stop", p.Order) @@ -343,7 +343,7 @@ func TestSetProcessOrderCommand(t *testing.T) { }) require.NoError(t, err) - p, err = s.ProcessGet(config.ProcessID()) + p, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "start", p.Order) } @@ -382,7 +382,7 @@ func TestSetProcessOrder(t *testing.T) { }) require.NoError(t, err) - p, err := s.ProcessGet(config.ProcessID()) + p, _, err := s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "stop", p.Order) @@ -392,7 +392,7 @@ func TestSetProcessOrder(t *testing.T) { }) require.NoError(t, err) - p, err = s.ProcessGet(config.ProcessID()) + p, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "start", p.Order) } @@ -416,7 +416,7 @@ func TestSetProcessMetadataCommand(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, s.data.Process) - p, err := s.ProcessGet(config.ProcessID()) + p, _, err := s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Empty(t, p.Metadata) @@ -432,7 +432,7 @@ func TestSetProcessMetadataCommand(t *testing.T) { }) require.NoError(t, err) - p, err = s.ProcessGet(config.ProcessID()) + p, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.NotEmpty(t, p.Metadata) @@ -477,7 +477,7 @@ func TestSetProcessMetadata(t *testing.T) { }) require.NoError(t, err) - p, err := s.ProcessGet(config.ProcessID()) + p, _, err := s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.NotEmpty(t, p.Metadata) @@ -492,7 +492,7 @@ func TestSetProcessMetadata(t *testing.T) { }) require.NoError(t, err) - p, err = s.ProcessGet(config.ProcessID()) + p, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.NotEmpty(t, p.Metadata) @@ -506,7 +506,7 @@ func TestSetProcessMetadata(t *testing.T) { }) require.NoError(t, err) - p, err = s.ProcessGet(config.ProcessID()) + p, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.NotEmpty(t, p.Metadata) @@ -533,7 +533,7 @@ func TestSetProcessErrorCommand(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, s.data.Process) - p, err := s.ProcessGet(config.ProcessID()) + p, _, err := s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "", p.Error) @@ -546,7 +546,7 @@ func TestSetProcessErrorCommand(t *testing.T) { }) require.NoError(t, err) - p, err = s.ProcessGet(config.ProcessID()) + p, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "foobar", p.Error) } @@ -585,7 +585,7 @@ func TestSetProcessError(t *testing.T) { }) require.NoError(t, err) - p, err := s.ProcessGet(config.ProcessID()) + p, _, err := s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "", p.Error) @@ -595,7 +595,7 @@ func TestSetProcessError(t *testing.T) { }) require.NoError(t, err) - p, err = s.ProcessGet(config.ProcessID()) + p, _, err = s.ProcessGet(config.ProcessID()) require.NoError(t, err) require.Equal(t, "foobar", p.Error) } diff --git a/cluster/store/store.go b/cluster/store/store.go index f6694f2b..4476ad82 100644 --- a/cluster/store/store.go +++ b/cluster/store/store.go @@ -7,8 +7,8 @@ import ( "time" "github.com/datarhei/core/v16/encoding/json" - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/restream/app" @@ -21,7 +21,8 @@ type Store interface { OnApply(func(op Operation)) ProcessList() []Process - ProcessGet(id app.ProcessID) (Process, error) + ProcessGet(id app.ProcessID) (Process, string, error) + ProcessGetNode(id app.ProcessID) (string, error) ProcessGetNodeMap() map[string]string ProcessGetRelocateMap() map[string]string @@ -55,7 +56,7 @@ type Users struct { type Policies struct { UpdatedAt time.Time - Policies []access.Policy + Policies []policy.Policy } type Value struct { @@ -153,7 +154,7 @@ type CommandRemoveIdentity struct { type CommandSetPolicies struct { Name string - Policies []access.Policy + Policies []policy.Policy } type CommandCreateLock struct { @@ -195,7 +196,7 @@ type storeData struct { Policies struct { UpdatedAt time.Time - Policies map[string][]access.Policy + Policies map[string][]policy.Policy } Locks map[string]time.Time @@ -216,7 +217,7 @@ func (s *storeData) init() { s.Users.Users = map[string]identity.User{} s.Users.userlist = identity.NewUserList() s.Policies.UpdatedAt = now - s.Policies.Policies = map[string][]access.Policy{} + s.Policies.Policies = map[string][]policy.Policy{} s.Locks = map[string]time.Time{} s.KVS = map[string]Value{} s.Nodes = map[string]Node{} diff --git a/config/config_test.go b/config/config_test.go index 132857fe..8cf89fcf 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -60,12 +60,12 @@ func TestValidateDefault(t *testing.T) { fs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - size, fresh, err := fs.WriteFileReader("./mime.types", strings.NewReader("xxxxx")) + size, fresh, err := fs.WriteFileReader("./mime.types", strings.NewReader("xxxxx"), -1) require.Equal(t, int64(5), size) require.Equal(t, true, fresh) require.NoError(t, err) - _, _, err = fs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx")) + _, _, err = fs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx"), -1) require.NoError(t, err) cfg := New(fs) diff --git a/docs/docs.go b/docs/docs.go index 5f9ab7e8..ee197903 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -381,7 +381,7 @@ const docTemplate = `{ "v16.?.?" ], "summary": "List of identities in the cluster", - "operationId": "cluster-3-db-list-identity", + "operationId": "cluster-3-db-get-identity", "responses": { "200": { "description": "OK", @@ -3061,6 +3061,12 @@ const docTemplate = `{ "type": "string" } }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.Error" + } + }, "403": { "description": "Forbidden", "schema": { @@ -4479,6 +4485,75 @@ const docTemplate = `{ } } } + }, + "put": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Set the report history a process", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "v16.?.?" + ], + "summary": "Set the report history a process", + "operationId": "process-3-set-report", + "parameters": [ + { + "type": "string", + "description": "Process ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Domain to act on", + "name": "domain", + "in": "query" + }, + { + "description": "Process report", + "name": "report", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.ProcessReport" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.Error" + } + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/api.Error" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } } }, "/api/v3/process/{id}/state": { @@ -6849,17 +6924,10 @@ const docTemplate = `{ "type": "integer", "format": "int64" }, - "exit_state": { - "type": "string" - }, - "exited_at": { - "type": "integer", - "format": "int64" - }, "history": { "type": "array", "items": { - "$ref": "#/definitions/api.ProcessReportEntry" + "$ref": "#/definitions/api.ProcessReportHistoryEntry" } }, "log": { @@ -6882,16 +6950,10 @@ const docTemplate = `{ "items": { "type": "string" } - }, - "progress": { - "$ref": "#/definitions/api.Progress" - }, - "resources": { - "$ref": "#/definitions/api.ProcessUsage" } } }, - "api.ProcessReportEntry": { + "api.ProcessReportHistoryEntry": { "type": "object", "properties": { "created_at": { diff --git a/docs/swagger.json b/docs/swagger.json index 06ac2d39..ba1fbbe4 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -373,7 +373,7 @@ "v16.?.?" ], "summary": "List of identities in the cluster", - "operationId": "cluster-3-db-list-identity", + "operationId": "cluster-3-db-get-identity", "responses": { "200": { "description": "OK", @@ -3053,6 +3053,12 @@ "type": "string" } }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.Error" + } + }, "403": { "description": "Forbidden", "schema": { @@ -4471,6 +4477,75 @@ } } } + }, + "put": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "Set the report history a process", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "v16.?.?" + ], + "summary": "Set the report history a process", + "operationId": "process-3-set-report", + "parameters": [ + { + "type": "string", + "description": "Process ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Domain to act on", + "name": "domain", + "in": "query" + }, + { + "description": "Process report", + "name": "report", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/api.ProcessReport" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/api.Error" + } + }, + "403": { + "description": "Forbidden", + "schema": { + "$ref": "#/definitions/api.Error" + } + }, + "404": { + "description": "Not Found", + "schema": { + "$ref": "#/definitions/api.Error" + } + } + } } }, "/api/v3/process/{id}/state": { @@ -6841,17 +6916,10 @@ "type": "integer", "format": "int64" }, - "exit_state": { - "type": "string" - }, - "exited_at": { - "type": "integer", - "format": "int64" - }, "history": { "type": "array", "items": { - "$ref": "#/definitions/api.ProcessReportEntry" + "$ref": "#/definitions/api.ProcessReportHistoryEntry" } }, "log": { @@ -6874,16 +6942,10 @@ "items": { "type": "string" } - }, - "progress": { - "$ref": "#/definitions/api.Progress" - }, - "resources": { - "$ref": "#/definitions/api.ProcessUsage" } } }, - "api.ProcessReportEntry": { + "api.ProcessReportHistoryEntry": { "type": "object", "properties": { "created_at": { diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 25f3cb48..61e9d8b2 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -1272,14 +1272,9 @@ definitions: created_at: format: int64 type: integer - exit_state: - type: string - exited_at: - format: int64 - type: integer history: items: - $ref: '#/definitions/api.ProcessReportEntry' + $ref: '#/definitions/api.ProcessReportHistoryEntry' type: array log: items: @@ -1295,12 +1290,8 @@ definitions: items: type: string type: array - progress: - $ref: '#/definitions/api.Progress' - resources: - $ref: '#/definitions/api.ProcessUsage' type: object - api.ProcessReportEntry: + api.ProcessReportHistoryEntry: properties: created_at: format: int64 @@ -2826,7 +2817,7 @@ paths: /api/v3/cluster/db/user/{name}: get: description: List of identities in the cluster - operationId: cluster-3-db-list-identity + operationId: cluster-3-db-get-identity produces: - application/json responses: @@ -4502,6 +4493,10 @@ paths: description: OK schema: type: string + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.Error' "403": description: Forbidden schema: @@ -5503,6 +5498,51 @@ paths: summary: Get the logs of a process tags: - v16.7.2 + put: + consumes: + - application/json + description: Set the report history a process + operationId: process-3-set-report + parameters: + - description: Process ID + in: path + name: id + required: true + type: string + - description: Domain to act on + in: query + name: domain + type: string + - description: Process report + in: body + name: report + required: true + schema: + $ref: '#/definitions/api.ProcessReport' + produces: + - application/json + responses: + "200": + description: OK + schema: + type: string + "400": + description: Bad Request + schema: + $ref: '#/definitions/api.Error' + "403": + description: Forbidden + schema: + $ref: '#/definitions/api.Error' + "404": + description: Not Found + schema: + $ref: '#/definitions/api.Error' + security: + - ApiKeyAuth: [] + summary: Set the report history a process + tags: + - v16.?.? /api/v3/process/{id}/state: get: description: Get the state and progress data of a process. diff --git a/ffmpeg/parse/parser.go b/ffmpeg/parse/parser.go index 9f3f8fd3..a6bf5127 100644 --- a/ffmpeg/parse/parser.go +++ b/ffmpeg/parse/parser.go @@ -1,11 +1,11 @@ package parse import ( + "bytes" "container/ring" "fmt" "regexp" "strconv" - "strings" "sync" "time" @@ -40,8 +40,8 @@ type Parser interface { // LastLogline returns the last parsed log line LastLogline() string - // TransferReportHistory transfers the report history to another parser - TransferReportHistory(Parser) error + // ImportReportHistory imports a report history from another parser + ImportReportHistory([]ReportHistoryEntry) } // Config is the config for the Parser implementation @@ -200,14 +200,14 @@ func New(config Config) Parser { return p } -func (p *parser) Parse(line string) uint64 { - isDefaultProgress := strings.HasPrefix(line, "frame=") - isFFmpegInputs := strings.HasPrefix(line, "ffmpeg.inputs:") - isFFmpegOutputs := strings.HasPrefix(line, "ffmpeg.outputs:") - isFFmpegMapping := strings.HasPrefix(line, "ffmpeg.mapping:") - isFFmpegProgress := strings.HasPrefix(line, "ffmpeg.progress:") - isHLSStreamMap := strings.HasPrefix(line, "hls.streammap:") - isAVstreamProgress := strings.HasPrefix(line, "avstream.progress:") +func (p *parser) Parse(line []byte) uint64 { + isDefaultProgress := bytes.HasPrefix(line, []byte("frame=")) + isFFmpegInputs := bytes.HasPrefix(line, []byte("ffmpeg.inputs:")) + isFFmpegOutputs := bytes.HasPrefix(line, []byte("ffmpeg.outputs:")) + isFFmpegMapping := bytes.HasPrefix(line, []byte("ffmpeg.mapping:")) + isFFmpegProgress := bytes.HasPrefix(line, []byte("ffmpeg.progress:")) + isHLSStreamMap := bytes.HasPrefix(line, []byte("hls.streammap:")) + isAVstreamProgress := bytes.HasPrefix(line, []byte("avstream.progress:")) p.lock.log.Lock() if p.logStart.IsZero() { @@ -235,7 +235,7 @@ func (p *parser) Parse(line string) uint64 { } if isFFmpegInputs { - if err := p.parseFFmpegIO("input", strings.TrimPrefix(line, "ffmpeg.inputs:")); err != nil { + if err := p.parseFFmpegIO("input", bytes.TrimPrefix(line, []byte("ffmpeg.inputs:"))); err != nil { p.logger.WithFields(log.Fields{ "line": line, "error": err, @@ -246,7 +246,7 @@ func (p *parser) Parse(line string) uint64 { } if isHLSStreamMap { - if err := p.parseHLSStreamMap(strings.TrimPrefix(line, "hls.streammap:")); err != nil { + if err := p.parseHLSStreamMap(bytes.TrimPrefix(line, []byte("hls.streammap:"))); err != nil { p.logger.WithFields(log.Fields{ "line": line, "error": err, @@ -257,7 +257,7 @@ func (p *parser) Parse(line string) uint64 { } if isFFmpegOutputs { - if err := p.parseFFmpegIO("output", strings.TrimPrefix(line, "ffmpeg.outputs:")); err != nil { + if err := p.parseFFmpegIO("output", bytes.TrimPrefix(line, []byte("ffmpeg.outputs:"))); err != nil { p.logger.WithFields(log.Fields{ "line": line, "error": err, @@ -287,7 +287,7 @@ func (p *parser) Parse(line string) uint64 { } if isFFmpegMapping { - if err := p.parseFFmpegMapping(strings.TrimPrefix(line, "ffmpeg.mapping:")); err != nil { + if err := p.parseFFmpegMapping(bytes.TrimPrefix(line, []byte("ffmpeg.mapping:"))); err != nil { p.logger.WithFields(log.Fields{ "line": line, "error": err, @@ -298,15 +298,16 @@ func (p *parser) Parse(line string) uint64 { } if !isDefaultProgress && !isFFmpegProgress && !isAVstreamProgress { + stringLine := string(line) // Write the current non-progress line to the log - p.addLog(line) + p.addLog(stringLine) p.lock.prelude.Lock() if !p.prelude.done { if len(p.prelude.data) < p.prelude.headLines { - p.prelude.data = append(p.prelude.data, line) + p.prelude.data = append(p.prelude.data, stringLine) } else { - p.prelude.tail.Value = line + p.prelude.tail.Value = stringLine p.prelude.tail = p.prelude.tail.Next() p.prelude.truncatedLines++ } @@ -315,8 +316,8 @@ func (p *parser) Parse(line string) uint64 { p.lock.log.Lock() for _, pattern := range p.logpatterns.patterns { - if pattern.MatchString(line) { - p.logpatterns.matches = append(p.logpatterns.matches, line) + if pattern.Match(line) { + p.logpatterns.matches = append(p.logpatterns.matches, stringLine) } } p.lock.log.Unlock() @@ -363,7 +364,7 @@ func (p *parser) Parse(line string) uint64 { // Update the progress if isAVstreamProgress { - if err := p.parseAVstreamProgress(strings.TrimPrefix(line, "avstream.progress:")); err != nil { + if err := p.parseAVstreamProgress(bytes.TrimPrefix(line, []byte("avstream.progress:"))); err != nil { p.logger.WithFields(log.Fields{ "line": line, "error": err, @@ -382,7 +383,7 @@ func (p *parser) Parse(line string) uint64 { return 0 } } else if isFFmpegProgress { - if err := p.parseFFmpegProgress(strings.TrimPrefix(line, "ffmpeg.progress:")); err != nil { + if err := p.parseFFmpegProgress(bytes.TrimPrefix(line, []byte("ffmpeg.progress:"))); err != nil { p.logger.WithFields(log.Fields{ "line": line, "error": err, @@ -466,48 +467,48 @@ func (p *parser) Parse(line string) uint64 { return pFrames } -func (p *parser) parseDefaultProgress(line string) error { - var matches []string +func (p *parser) parseDefaultProgress(line []byte) error { + var matches [][]byte - if matches = p.re.frame.FindStringSubmatch(line); matches != nil { - if x, err := strconv.ParseUint(matches[1], 10, 64); err == nil { + if matches = p.re.frame.FindSubmatch(line); matches != nil { + if x, err := strconv.ParseUint(string(matches[1]), 10, 64); err == nil { p.progress.ffmpeg.Frame = x } } - if matches = p.re.quantizer.FindStringSubmatch(line); matches != nil { - if x, err := strconv.ParseFloat(matches[1], 64); err == nil { + if matches = p.re.quantizer.FindSubmatch(line); matches != nil { + if x, err := strconv.ParseFloat(string(matches[1]), 64); err == nil { p.progress.ffmpeg.Quantizer = x } } - if matches = p.re.size.FindStringSubmatch(line); matches != nil { - if x, err := strconv.ParseUint(matches[1], 10, 64); err == nil { + if matches = p.re.size.FindSubmatch(line); matches != nil { + if x, err := strconv.ParseUint(string(matches[1]), 10, 64); err == nil { p.progress.ffmpeg.Size = x * 1024 } } - if matches = p.re.time.FindStringSubmatch(line); matches != nil { - s := fmt.Sprintf("%sh%sm%ss%s0ms", matches[1], matches[2], matches[3], matches[4]) + if matches = p.re.time.FindSubmatch(line); matches != nil { + s := fmt.Sprintf("%sh%sm%ss%s0ms", string(matches[1]), string(matches[2]), string(matches[3]), string(matches[4])) if x, err := time.ParseDuration(s); err == nil { p.progress.ffmpeg.Time.Duration = x } } - if matches = p.re.speed.FindStringSubmatch(line); matches != nil { - if x, err := strconv.ParseFloat(matches[1], 64); err == nil { + if matches = p.re.speed.FindSubmatch(line); matches != nil { + if x, err := strconv.ParseFloat(string(matches[1]), 64); err == nil { p.progress.ffmpeg.Speed = x } } - if matches = p.re.drop.FindStringSubmatch(line); matches != nil { - if x, err := strconv.ParseUint(matches[1], 10, 64); err == nil { + if matches = p.re.drop.FindSubmatch(line); matches != nil { + if x, err := strconv.ParseUint(string(matches[1]), 10, 64); err == nil { p.progress.ffmpeg.Drop = x } } - if matches = p.re.dup.FindStringSubmatch(line); matches != nil { - if x, err := strconv.ParseUint(matches[1], 10, 64); err == nil { + if matches = p.re.dup.FindSubmatch(line); matches != nil { + if x, err := strconv.ParseUint(string(matches[1]), 10, 64); err == nil { p.progress.ffmpeg.Dup = x } } @@ -515,10 +516,10 @@ func (p *parser) parseDefaultProgress(line string) error { return nil } -func (p *parser) parseFFmpegIO(kind, line string) error { +func (p *parser) parseFFmpegIO(kind string, line []byte) error { processIO := []ffmpegProcessIO{} - err := json.Unmarshal([]byte(line), &processIO) + err := json.Unmarshal(line, &processIO) if err != nil { return err } @@ -542,10 +543,10 @@ func (p *parser) parseFFmpegIO(kind, line string) error { return nil } -func (p *parser) parseFFmpegMapping(line string) error { +func (p *parser) parseFFmpegMapping(line []byte) error { mapping := ffmpegStreamMapping{} - err := json.Unmarshal([]byte(line), &mapping) + err := json.Unmarshal(line, &mapping) if err != nil { return err } @@ -555,10 +556,10 @@ func (p *parser) parseFFmpegMapping(line string) error { return nil } -func (p *parser) parseHLSStreamMap(line string) error { +func (p *parser) parseHLSStreamMap(line []byte) error { mapping := ffmpegHLSStreamMap{} - err := json.Unmarshal([]byte(line), &mapping) + err := json.Unmarshal(line, &mapping) if err != nil { return err } @@ -568,10 +569,10 @@ func (p *parser) parseHLSStreamMap(line string) error { return nil } -func (p *parser) parseFFmpegProgress(line string) error { +func (p *parser) parseFFmpegProgress(line []byte) error { progress := ffmpegProgress{} - err := json.Unmarshal([]byte(line), &progress) + err := json.Unmarshal(line, &progress) if err != nil { return err } @@ -609,10 +610,10 @@ func (p *parser) parseFFmpegProgress(line string) error { return nil } -func (p *parser) parseAVstreamProgress(line string) error { +func (p *parser) parseAVstreamProgress(line []byte) error { progress := ffmpegAVstream{} - err := json.Unmarshal([]byte(line), &progress) + err := json.Unmarshal(line, &progress) if err != nil { return err } @@ -766,7 +767,7 @@ func (p *parser) addLog(line string) { p.log.Value = process.Line{ Timestamp: time.Now(), - Data: line, + Data: p.lastLogline, } p.log = p.log.Next() } @@ -862,31 +863,6 @@ func (p *parser) ResetLog() { p.lock.log.Unlock() } -// Report represents a log report, including the prelude and the last log lines of the process. -type Report struct { - CreatedAt time.Time - Prelude []string - Log []process.Line - Matches []string -} - -// ReportHistoryEntry represents an historical log report, including the exit status of the -// process and the last progress data. -type ReportHistoryEntry struct { - Report - - ExitedAt time.Time - ExitState string - Progress Progress - Usage Usage -} - -type ReportHistorySearchResult struct { - CreatedAt time.Time - ExitedAt time.Time - ExitState string -} - func (p *parser) SearchReportHistory(state string, from, to *time.Time) []ReportHistorySearchResult { p.lock.logHistory.RLock() defer p.lock.logHistory.RUnlock() @@ -1006,26 +982,20 @@ func (p *parser) ReportHistory() []ReportHistoryEntry { return history } -func (p *parser) TransferReportHistory(dst Parser) error { - p.lock.logHistory.RLock() - defer p.lock.logHistory.RUnlock() - - pp, ok := dst.(*parser) - if !ok { - return fmt.Errorf("the target parser is not of the required type") - } +func (p *parser) ImportReportHistory(history []ReportHistoryEntry) { + p.lock.logHistory.Lock() + defer p.lock.logHistory.Unlock() - pp.lock.logHistory.Lock() - defer pp.lock.logHistory.Unlock() + historyLength := p.logHistoryLength + p.logMinimalHistoryLength - p.logHistory.Do(func(l interface{}) { - if l == nil { - return - } + if historyLength <= 0 { + return + } - pp.logHistory.Value = l - pp.logHistory = pp.logHistory.Next() - }) + p.logHistory = ring.New(historyLength) - return nil + for _, r := range history { + p.logHistory.Value = r + p.logHistory = p.logHistory.Next() + } } diff --git a/ffmpeg/parse/parser_test.go b/ffmpeg/parse/parser_test.go index ee67ee26..e041fc4f 100644 --- a/ffmpeg/parse/parser_test.go +++ b/ffmpeg/parse/parser_test.go @@ -17,16 +17,14 @@ func TestParserProgress(t *testing.T) { }).(*parser) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) d, _ := time.ParseDuration("3m58s440ms") wantP := Progress{ Frame: 5968, - FPS: 25, Quantizer: 19.4, Size: 453632, Time: d.Seconds(), - Bitrate: 5632, Speed: 0.999, Drop: 3522, Dup: 87463, @@ -68,7 +66,7 @@ func TestParserPrelude(t *testing.T) { require.Equal(t, 0, len(log)) - parser.Parse("prelude") + parser.Parse([]byte("prelude")) log = parser.Prelude() @@ -87,7 +85,7 @@ func TestParserLongPrelude(t *testing.T) { require.Equal(t, 0, len(log)) for i := 0; i < 150; i++ { - parser.Parse(fmt.Sprintf("prelude %3d", i)) + parser.Parse([]byte(fmt.Sprintf("prelude %3d", i))) } log = parser.Prelude() @@ -107,7 +105,7 @@ func TestParserVeryLongPrelude(t *testing.T) { require.Equal(t, 0, len(log)) for i := 0; i < 300; i++ { - parser.Parse(fmt.Sprintf("prelude %3d", i)) + parser.Parse([]byte(fmt.Sprintf("prelude %3d", i))) } log = parser.Prelude() @@ -124,7 +122,7 @@ func TestParserLog(t *testing.T) { require.Equal(t, 0, len(log)) - parser.Parse("bla") + parser.Parse([]byte("bla")) log = parser.Log() @@ -136,19 +134,19 @@ func TestParserLastLogLine(t *testing.T) { LogLines: 20, }).(*parser) - parser.Parse("foo") + parser.Parse([]byte("foo")) line := parser.LastLogline() require.Equal(t, "foo", line) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) // progress lines are not logged line = parser.LastLogline() require.Equal(t, "foo", line) - parser.Parse("bar") + parser.Parse([]byte("bar")) line = parser.LastLogline() require.Equal(t, "bar", line) } @@ -160,10 +158,10 @@ func TestParserLogHistory(t *testing.T) { }).(*parser) for i := 0; i < 7; i++ { - parser.Parse("bla") + parser.Parse([]byte("bla")) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) history := parser.ReportHistory() require.Equal(t, int(math.Min(float64(i), 5)), len(history)) @@ -202,6 +200,67 @@ func TestParserLogHistory(t *testing.T) { } } +func TestParserImportLogHistory(t *testing.T) { + parser := New(Config{ + LogLines: 20, + LogHistory: 5, + }).(*parser) + + for i := 0; i < 7; i++ { + parser.Parse([]byte("bla")) + + parser.prelude.done = true + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) + + history := parser.ReportHistory() + require.Equal(t, int(math.Min(float64(i), 5)), len(history)) + + parser.Stop("finished", process.Usage{}) + parser.ResetStats() + + time.Sleep(time.Second) + } + + history := parser.ReportHistory() + + for i, h := range history { + h.Prelude[0] = "blubb" + h.ExitState = "nothing" + h.Progress.Frame = 42 + + history[i] = h + } + + parser.ImportReportHistory(history[:3]) + + history = parser.ReportHistory() + require.Equal(t, 3, len(history)) + + for i := 0; i < 3; i++ { + require.Equal(t, "nothing", history[i].ExitState) + require.Equal(t, "bla", history[i].Log[0].Data) + require.Equal(t, "blubb", history[i].Prelude[0]) + + d, _ := time.ParseDuration("3m58s440ms") + require.Equal(t, Progress{ + Started: true, + Frame: 42, + FPS: 0, // is calculated with averager + Quantizer: 19.4, + Size: 453632, + Time: d.Seconds(), + Bitrate: 0, // is calculated with averager + Speed: 0.999, + Drop: 3522, + Dup: 87463, + }, history[i].Progress) + + if i != 0 { + require.Greater(t, history[i].CreatedAt, history[i-1].ExitedAt) + } + } +} + func TestParserLogHistoryLength(t *testing.T) { parser := New(Config{ LogLines: 20, @@ -212,10 +271,10 @@ func TestParserLogHistoryLength(t *testing.T) { require.Equal(t, 0, len(history)) for i := 0; i < 5; i++ { - parser.Parse("bla") + parser.Parse([]byte("bla")) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) parser.Stop("finished", process.Usage{}) } @@ -235,10 +294,10 @@ func TestParserLogMinimalHistoryLength(t *testing.T) { require.Equal(t, 0, len(history)) for i := 0; i < 42; i++ { - parser.Parse("bla") + parser.Parse([]byte("bla")) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) parser.Stop("finished", process.Usage{}) } @@ -294,10 +353,10 @@ func TestParserLogMinimalHistoryLengthWithoutFullHistory(t *testing.T) { require.Equal(t, 0, len(history)) for i := 0; i < 15; i++ { - parser.Parse("bla") + parser.Parse([]byte("bla")) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) parser.Stop("finished", process.Usage{}) } @@ -316,10 +375,10 @@ func TestParserLogHistorySearch(t *testing.T) { LogHistory: 5, }).(*parser) - parser.Parse("foo") + parser.Parse([]byte("foo")) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) parser.Stop("finished", process.Usage{}) @@ -329,10 +388,10 @@ func TestParserLogHistorySearch(t *testing.T) { parser.ResetLog() - parser.Parse("bar") + parser.Parse([]byte("bar")) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) parser.Stop("finished", process.Usage{}) @@ -342,10 +401,10 @@ func TestParserLogHistorySearch(t *testing.T) { parser.ResetLog() - parser.Parse("foobar") + parser.Parse([]byte("foobar")) parser.prelude.done = true - parser.Parse("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463") + parser.Parse([]byte("frame= 5968 fps= 25 q=19.4 size=443kB time=00:03:58.44 bitrate=5632kbits/s speed=0.999x skip=9733 drop=3522 dup=87463")) parser.Stop("failed", process.Usage{}) @@ -407,7 +466,7 @@ func TestParserReset(t *testing.T) { require.Equal(t, 0, len(log)) require.Equal(t, 0, len(prelude)) - parser.Parse("prelude") + parser.Parse([]byte("prelude")) log = parser.Log() prelude = parser.Prelude() @@ -484,7 +543,7 @@ frame= 58 fps= 25 q=-1.0 Lsize=N/A time=00:00:02.32 bitrate=N/A speed=0.999x` data := strings.Split(rawdata, "\n") for _, d := range data { - parser.Parse(d) + parser.Parse([]byte(d)) } require.Equal(t, 3, len(parser.process.input), "expected 3 inputs") @@ -543,7 +602,7 @@ frame= 58 fps= 25 q=-1.0 Lsize=N/A time=00:00:02.32 bitrate=N/A speed=0.999x` data := strings.Split(rawdata, "\n") for _, d := range data { - parser.Parse(d) + parser.Parse([]byte(d)) } require.Equal(t, 2, len(parser.process.input), "expected 2 inputs") @@ -626,7 +685,7 @@ ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"frame":21,"packet":24,"size_kb data := strings.Split(rawdata, "\n") for _, d := range data { - parser.Parse(d) + parser.Parse([]byte(d)) } require.Equal(t, 1, len(parser.process.input), "expected 1 input") @@ -709,7 +768,7 @@ ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"frame":21,"packet":24,"size_kb data := strings.Split(rawdata, "\n") for _, d := range data { - parser.Parse(d) + parser.Parse([]byte(d)) } require.Equal(t, 1, len(parser.process.input), "expected 1 input") @@ -793,7 +852,7 @@ ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"frame":21,"packet":24,"size_kb data := strings.Split(rawdata, "\n") for _, d := range data { - parser.Parse(d) + parser.Parse([]byte(d)) } require.Equal(t, 1, len(parser.process.input), "expected 1 input") @@ -805,10 +864,10 @@ func TestParserProgressPlayout(t *testing.T) { LogLines: 20, }).(*parser) - parser.Parse(`ffmpeg.inputs:[{"url":"playout:https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","format":"playout","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`) - parser.Parse(`ffmpeg.outputs:[{"url":"/dev/null","format":"flv","index":0,"stream":0,"type":"video","codec":"h264","coder":"libx264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"/dev/null","format":"mp4","index":1,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`) - parser.Parse(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"frame":7,"keyframe":1,"packet":11,"size_kb":226,"size_bytes":42}],"outputs":[{"index":0,"stream":0,"frame":7,"keyframe":1,"packet":0,"q":0.0,"size_kb":0,"size_bytes":5,"extradata_size_bytes":32},{"index":1,"stream":0,"frame":11,"packet":11,"q":-1.0,"size_kb":226}],"frame":7,"packet":0,"q":0.0,"size_kb":226,"time":"0h0m0.56s","speed":0.4,"dup":0,"drop":0}`) - parser.Parse(`avstream.progress:{"id":"playout:https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","stream":0,"queue":140,"aqueue":42,"dup":5,"drop":8,"enc":7,"looping":true,"duplicating":true,"gop":"key","mode":"live","input":{"state":"running","packet":148,"size_kb":1529,"time":5},"output":{"state":"running","packet":8,"size_kb":128,"time":1},"swap":{"url":"","status":"waiting","lasturl":"","lasterror":""}}`) + parser.Parse([]byte(`ffmpeg.inputs:[{"url":"playout:https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","format":"playout","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`)) + parser.Parse([]byte(`ffmpeg.outputs:[{"url":"/dev/null","format":"flv","index":0,"stream":0,"type":"video","codec":"h264","coder":"libx264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"/dev/null","format":"mp4","index":1,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`)) + parser.Parse([]byte(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"frame":7,"keyframe":1,"packet":11,"size_kb":226,"size_bytes":42}],"outputs":[{"index":0,"stream":0,"frame":7,"keyframe":1,"packet":0,"q":0.0,"size_kb":0,"size_bytes":5,"extradata_size_bytes":32},{"index":1,"stream":0,"frame":11,"packet":11,"q":-1.0,"size_kb":226}],"frame":7,"packet":0,"q":0.0,"size_kb":226,"time":"0h0m0.56s","speed":0.4,"dup":0,"drop":0}`)) + parser.Parse([]byte(`avstream.progress:{"id":"playout:https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","stream":0,"queue":140,"aqueue":42,"dup":5,"drop":8,"enc":7,"looping":true,"duplicating":true,"gop":"key","mode":"live","input":{"state":"running","packet":148,"size_kb":1529,"time":5},"output":{"state":"running","packet":8,"size_kb":128,"time":1},"swap":{"url":"","status":"waiting","lasturl":"","lasterror":""}}`)) progress := parser.Progress() @@ -937,11 +996,11 @@ func TestParserStreamMapping(t *testing.T) { LogLines: 20, }).(*parser) - parser.Parse(`ffmpeg.inputs:[{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_720.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_1440.m3u8","format":"hls","index":1,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"anullsrc=r=44100:cl=mono","format":"lavfi","index":2,"stream":0,"type":"audio","codec":"pcm_u8","coder":"pcm_u8","bitrate_kbps":352,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`) - parser.Parse(`hls.streammap:{"address":"http://127.0.0.1:8080/memfs/live/%v.m3u8","variants":[{"variant":0,"address":"http://127.0.0.1:8080/memfs/live/0.m3u8","streams":[0,2]},{"variant":1,"address":"http://127.0.0.1:8080/memfs/live/1.m3u8","streams":[1,3]}]}`) - parser.Parse(`ffmpeg.outputs:[{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":1,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":2,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":3,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`) - parser.Parse(`ffmpeg.mapping:{"graphs":[{"index":0,"graph":[{"src_name":"Parsed_null_0","src_filter":"null","dst_name":"format","dst_filter":"format","inpad":"default","outpad":"default","timebase": "1/90000","type":"video","format":"yuvj420p","width":1280,"height":720},{"src_name":"graph 0 input from stream 0:0","src_filter":"buffer","dst_name":"Parsed_null_0","dst_filter":"null","inpad":"default","outpad":"default","timebase": "1/90000","type":"video","format":"yuvj420p","width":1280,"height":720},{"src_name":"format","src_filter":"format","dst_name":"out_0_0","dst_filter":"buffersink","inpad":"default","outpad":"default","timebase": "1/90000","type":"video","format":"yuvj420p","width":1280,"height":720}]},{"index":1,"graph":[{"src_name":"Parsed_anull_0","src_filter":"anull","dst_name":"auto_aresample_0","dst_filter":"aresample","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"graph_1_in_2_0","src_filter":"abuffer","dst_name":"Parsed_anull_0","dst_filter":"anull","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"format_out_0_2","src_filter":"aformat","dst_name":"out_0_2","dst_filter":"abuffersink","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"},{"src_name":"auto_aresample_0","src_filter":"aresample","dst_name":"format_out_0_2","dst_filter":"aformat","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"}]},{"index":2,"graph":[{"src_name":"Parsed_anull_0","src_filter":"anull","dst_name":"auto_aresample_0","dst_filter":"aresample","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"graph_2_in_2_0","src_filter":"abuffer","dst_name":"Parsed_anull_0","dst_filter":"anull","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"format_out_0_3","src_filter":"aformat","dst_name":"out_0_3","dst_filter":"abuffersink","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"},{"src_name":"auto_aresample_0","src_filter":"aresample","dst_name":"format_out_0_3","dst_filter":"aformat","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"}]}],"mapping":[{"input":{"index":0,"stream":0},"graph":{"index":0,"name":"graph 0 input from stream 0:0"},"output":null},{"input":{"index":2,"stream":0},"graph":{"index":1,"name":"graph_1_in_2_0"},"output":null},{"input":{"index":2,"stream":0},"graph":{"index":2,"name":"graph_2_in_2_0"},"output":null},{"input":null,"graph":{"index":0,"name":"out_0_0"},"output":{"index":0,"stream":0}},{"input":{"index":1,"stream":0},"output":{"index":0,"stream":1},"copy":true},{"input":null,"graph":{"index":1,"name":"out_0_2"},"output":{"index":0,"stream":2}},{"input":null,"graph":{"index":2,"name":"out_0_3"},"output":{"index":0,"stream":3}}]}`) - parser.Parse(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":1467,"size_bytes":1501854},{"index":1,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":4428,"size_bytes":4534541},{"index":2,"stream":0,"framerate":{"min":43.066,"max":43.068,"avg":43.066},"frame":257,"keyframe":257,"packet":257,"size_kb":257,"size_bytes":263168}],"outputs":[{"index":0,"stream":0,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":1467,"size_bytes":1501923,"extradata_size_bytes":69},{"index":0,"stream":1,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":4428,"size_bytes":4534612,"extradata_size_bytes":71},{"index":0,"stream":2,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5},{"index":0,"stream":3,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5}],"frame":149,"packet":149,"q":-1.0,"size_kb":5897,"size_bytes":6038627,"time":"0h0m5.96s","speed":4.79,"dup":0,"drop":0}`) + parser.Parse([]byte(`ffmpeg.inputs:[{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_720.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_1440.m3u8","format":"hls","index":1,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"anullsrc=r=44100:cl=mono","format":"lavfi","index":2,"stream":0,"type":"audio","codec":"pcm_u8","coder":"pcm_u8","bitrate_kbps":352,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`)) + parser.Parse([]byte(`hls.streammap:{"address":"http://127.0.0.1:8080/memfs/live/%v.m3u8","variants":[{"variant":0,"address":"http://127.0.0.1:8080/memfs/live/0.m3u8","streams":[0,2]},{"variant":1,"address":"http://127.0.0.1:8080/memfs/live/1.m3u8","streams":[1,3]}]}`)) + parser.Parse([]byte(`ffmpeg.outputs:[{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":1,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":2,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":3,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`)) + parser.Parse([]byte(`ffmpeg.mapping:{"graphs":[{"index":0,"graph":[{"src_name":"Parsed_null_0","src_filter":"null","dst_name":"format","dst_filter":"format","inpad":"default","outpad":"default","timebase": "1/90000","type":"video","format":"yuvj420p","width":1280,"height":720},{"src_name":"graph 0 input from stream 0:0","src_filter":"buffer","dst_name":"Parsed_null_0","dst_filter":"null","inpad":"default","outpad":"default","timebase": "1/90000","type":"video","format":"yuvj420p","width":1280,"height":720},{"src_name":"format","src_filter":"format","dst_name":"out_0_0","dst_filter":"buffersink","inpad":"default","outpad":"default","timebase": "1/90000","type":"video","format":"yuvj420p","width":1280,"height":720}]},{"index":1,"graph":[{"src_name":"Parsed_anull_0","src_filter":"anull","dst_name":"auto_aresample_0","dst_filter":"aresample","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"graph_1_in_2_0","src_filter":"abuffer","dst_name":"Parsed_anull_0","dst_filter":"anull","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"format_out_0_2","src_filter":"aformat","dst_name":"out_0_2","dst_filter":"abuffersink","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"},{"src_name":"auto_aresample_0","src_filter":"aresample","dst_name":"format_out_0_2","dst_filter":"aformat","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"}]},{"index":2,"graph":[{"src_name":"Parsed_anull_0","src_filter":"anull","dst_name":"auto_aresample_0","dst_filter":"aresample","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"graph_2_in_2_0","src_filter":"abuffer","dst_name":"Parsed_anull_0","dst_filter":"anull","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"u8","sampling_hz":44100,"layout":"mono"},{"src_name":"format_out_0_3","src_filter":"aformat","dst_name":"out_0_3","dst_filter":"abuffersink","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"},{"src_name":"auto_aresample_0","src_filter":"aresample","dst_name":"format_out_0_3","dst_filter":"aformat","inpad":"default","outpad":"default","timebase": "1/44100","type":"audio","format":"fltp","sampling_hz":44100,"layout":"mono"}]}],"mapping":[{"input":{"index":0,"stream":0},"graph":{"index":0,"name":"graph 0 input from stream 0:0"},"output":null},{"input":{"index":2,"stream":0},"graph":{"index":1,"name":"graph_1_in_2_0"},"output":null},{"input":{"index":2,"stream":0},"graph":{"index":2,"name":"graph_2_in_2_0"},"output":null},{"input":null,"graph":{"index":0,"name":"out_0_0"},"output":{"index":0,"stream":0}},{"input":{"index":1,"stream":0},"output":{"index":0,"stream":1},"copy":true},{"input":null,"graph":{"index":1,"name":"out_0_2"},"output":{"index":0,"stream":2}},{"input":null,"graph":{"index":2,"name":"out_0_3"},"output":{"index":0,"stream":3}}]}`)) + parser.Parse([]byte(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":1467,"size_bytes":1501854},{"index":1,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":4428,"size_bytes":4534541},{"index":2,"stream":0,"framerate":{"min":43.066,"max":43.068,"avg":43.066},"frame":257,"keyframe":257,"packet":257,"size_kb":257,"size_bytes":263168}],"outputs":[{"index":0,"stream":0,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":1467,"size_bytes":1501923,"extradata_size_bytes":69},{"index":0,"stream":1,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":4428,"size_bytes":4534612,"extradata_size_bytes":71},{"index":0,"stream":2,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5},{"index":0,"stream":3,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5}],"frame":149,"packet":149,"q":-1.0,"size_kb":5897,"size_bytes":6038627,"time":"0h0m5.96s","speed":4.79,"dup":0,"drop":0}`)) progress := parser.Progress() @@ -1037,10 +1096,10 @@ func TestParserHLSMapping(t *testing.T) { LogLines: 20, }).(*parser) - parser.Parse(`ffmpeg.inputs:[{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_720.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_1440.m3u8","format":"hls","index":1,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"anullsrc=r=44100:cl=mono","format":"lavfi","index":2,"stream":0,"type":"audio","codec":"pcm_u8","coder":"pcm_u8","bitrate_kbps":352,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`) - parser.Parse(`hls.streammap:{"address":"http://127.0.0.1:8080/memfs/live/%v.m3u8","variants":[{"variant":0,"address":"http://127.0.0.1:8080/memfs/live/0.m3u8","streams":[0,2]},{"variant":1,"address":"http://127.0.0.1:8080/memfs/live/1.m3u8","streams":[1,3]}]}`) - parser.Parse(`ffmpeg.outputs:[{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":1,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":2,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":3,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`) - parser.Parse(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":1467,"size_bytes":1501854},{"index":1,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":4428,"size_bytes":4534541},{"index":2,"stream":0,"framerate":{"min":43.066,"max":43.068,"avg":43.066},"frame":257,"keyframe":257,"packet":257,"size_kb":257,"size_bytes":263168}],"outputs":[{"index":0,"stream":0,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":1467,"size_bytes":1501923,"extradata_size_bytes":69},{"index":0,"stream":1,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":4428,"size_bytes":4534612,"extradata_size_bytes":71},{"index":0,"stream":2,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5},{"index":0,"stream":3,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5}],"frame":149,"packet":149,"q":-1.0,"size_kb":5897,"size_bytes":6038627,"time":"0h0m5.96s","speed":4.79,"dup":0,"drop":0}`) + parser.Parse([]byte(`ffmpeg.inputs:[{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_720.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_1440.m3u8","format":"hls","index":1,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"anullsrc=r=44100:cl=mono","format":"lavfi","index":2,"stream":0,"type":"audio","codec":"pcm_u8","coder":"pcm_u8","bitrate_kbps":352,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`)) + parser.Parse([]byte(`hls.streammap:{"address":"http://127.0.0.1:8080/memfs/live/%v.m3u8","variants":[{"variant":0,"address":"http://127.0.0.1:8080/memfs/live/0.m3u8","streams":[0,2]},{"variant":1,"address":"http://127.0.0.1:8080/memfs/live/1.m3u8","streams":[1,3]}]}`)) + parser.Parse([]byte(`ffmpeg.outputs:[{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":1,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":2,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":3,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`)) + parser.Parse([]byte(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":1467,"size_bytes":1501854},{"index":1,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":4428,"size_bytes":4534541},{"index":2,"stream":0,"framerate":{"min":43.066,"max":43.068,"avg":43.066},"frame":257,"keyframe":257,"packet":257,"size_kb":257,"size_bytes":263168}],"outputs":[{"index":0,"stream":0,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":1467,"size_bytes":1501923,"extradata_size_bytes":69},{"index":0,"stream":1,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":4428,"size_bytes":4534612,"extradata_size_bytes":71},{"index":0,"stream":2,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5},{"index":0,"stream":3,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5}],"frame":149,"packet":149,"q":-1.0,"size_kb":5897,"size_bytes":6038627,"time":"0h0m5.96s","speed":4.79,"dup":0,"drop":0}`)) progress := parser.Progress() @@ -1073,14 +1132,14 @@ func TestParserPatterns(t *testing.T) { }, }) - p.Parse("some foobar more") + p.Parse([]byte("some foobar more")) require.Empty(t, p.Report().Matches) - p.Parse("foobar some more") + p.Parse([]byte("foobar some more")) require.Equal(t, 1, len(p.Report().Matches)) require.Equal(t, "foobar some more", p.Report().Matches[0]) - p.Parse("some more foobar") + p.Parse([]byte("some more foobar")) require.Equal(t, 2, len(p.Report().Matches)) require.Equal(t, "some more foobar", p.Report().Matches[1]) @@ -1106,3 +1165,25 @@ func TestParserPatternsError(t *testing.T) { require.Equal(t, 1, len(parser.Report().Matches)) } + +func BenchmarkParserString(b *testing.B) { + parser := New(Config{ + LogLines: 100, + }) + + parser.Parse([]byte(`ffmpeg.inputs:[{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_720.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk_1440.m3u8","format":"hls","index":1,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"anullsrc=r=44100:cl=mono","format":"lavfi","index":2,"stream":0,"type":"audio","codec":"pcm_u8","coder":"pcm_u8","bitrate_kbps":352,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`)) + parser.Parse([]byte(`hls.streammap:{"address":"http://127.0.0.1:8080/memfs/live/%v.m3u8","variants":[{"variant":0,"address":"http://127.0.0.1:8080/memfs/live/0.m3u8","streams":[0,2]},{"variant":1,"address":"http://127.0.0.1:8080/memfs/live/1.m3u8","streams":[1,3]}]}`)) + parser.Parse([]byte(`ffmpeg.outputs:[{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":1,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":2560,"height":1440},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":2,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1},{"url":"http://127.0.0.1:8080/memfs/live/%v.m3u8","format":"hls","index":0,"stream":3,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":69,"duration_sec":0.000000,"language":"und","sampling_hz":44100,"layout":"mono","channels":1}]`)) + + data := [][]byte{ + []byte(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":1467,"size_bytes":1501854},{"index":1,"stream":0,"framerate":{"min":24.975,"max":24.975,"avg":24.975},"frame":149,"keyframe":3,"packet":149,"size_kb":4428,"size_bytes":4534541},{"index":2,"stream":0,"framerate":{"min":43.066,"max":43.068,"avg":43.066},"frame":257,"keyframe":257,"packet":257,"size_kb":257,"size_bytes":263168}],"outputs":[{"index":0,"stream":0,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":1467,"size_bytes":1501923,"extradata_size_bytes":69},{"index":0,"stream":1,"frame":149,"keyframe":3,"packet":149,"q":-1.0,"size_kb":4428,"size_bytes":4534612,"extradata_size_bytes":71},{"index":0,"stream":2,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5},{"index":0,"stream":3,"frame":257,"keyframe":256,"packet":256,"size_kb":1,"size_bytes":1046,"extradata_size_bytes":5}],"frame":149,"packet":149,"q":-1.0,"size_kb":5897,"size_bytes":6038627,"time":"0h0m5.96s","speed":4.79,"dup":0,"drop":0}`), + []byte(`[https @ 0x557c840d1080] Opening 'https://ch-fra-n16.livespotting.com/vpu/e9slfpe3/z60wzayk_720_100794.ts' for reading`), + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + parser.Parse(data[0]) + parser.Parse(data[1]) + } +} diff --git a/ffmpeg/parse/types.go b/ffmpeg/parse/types.go index 3f7910cd..01ffcc78 100644 --- a/ffmpeg/parse/types.go +++ b/ffmpeg/parse/types.go @@ -5,6 +5,7 @@ import ( "time" "github.com/datarhei/core/v16/encoding/json" + "github.com/datarhei/core/v16/process" ) // Duration represents a time.Duration @@ -500,17 +501,21 @@ type AVstream struct { } type Usage struct { - CPU struct { - NCPU float64 - Average float64 - Max float64 - Limit float64 - } - Memory struct { - Average float64 - Max uint64 - Limit uint64 - } + CPU UsageCPU + Memory UsageMemory +} + +type UsageCPU struct { + NCPU float64 + Average float64 + Max float64 + Limit float64 +} + +type UsageMemory struct { + Average float64 + Max uint64 + Limit uint64 } type GraphElement struct { @@ -542,3 +547,28 @@ type StreamMapping struct { Graphs []GraphElement Mapping []GraphMapping } + +// Report represents a log report, including the prelude and the last log lines of the process. +type Report struct { + CreatedAt time.Time + Prelude []string + Log []process.Line + Matches []string +} + +// ReportHistoryEntry represents an historical log report, including the exit status of the +// process and the last progress data. +type ReportHistoryEntry struct { + Report + + ExitedAt time.Time + ExitState string + Progress Progress + Usage Usage +} + +type ReportHistorySearchResult struct { + CreatedAt time.Time + ExitedAt time.Time + ExitState string +} diff --git a/ffmpeg/probe/prober.go b/ffmpeg/probe/prober.go index 7a51baad..f6a94588 100644 --- a/ffmpeg/probe/prober.go +++ b/ffmpeg/probe/prober.go @@ -1,6 +1,7 @@ package probe import ( + "bytes" "strings" "time" @@ -54,14 +55,14 @@ func (p *prober) Probe() Probe { return probe } -func (p *prober) Parse(line string) uint64 { - if strings.HasPrefix(line, "avstream.progress:") { +func (p *prober) Parse(line []byte) uint64 { + if bytes.HasPrefix(line, []byte("avstream.progress:")) { return 0 } p.data = append(p.data, process.Line{ Timestamp: time.Now(), - Data: line, + Data: string(line), }) return 0 diff --git a/ffmpeg/probe/prober_test.go b/ffmpeg/probe/prober_test.go index d8836cbb..d2fd9c2d 100644 --- a/ffmpeg/probe/prober_test.go +++ b/ffmpeg/probe/prober_test.go @@ -56,7 +56,7 @@ Press [q] to stop, [?] for help` data := strings.Split(rawdata, "\n") for _, d := range data { - prober.Parse(d) + prober.Parse([]byte(d)) } prober.ResetStats() @@ -169,8 +169,8 @@ Press [q] to stop, [?] for help` func TestJSON(t *testing.T) { prober := New(Config{}).(*prober) - prober.Parse("foobar") - prober.Parse(`ffmpeg.inputs:[{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","format":"playout","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`) + prober.Parse([]byte("foobar")) + prober.Parse([]byte(`ffmpeg.inputs:[{"url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","format":"playout","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`)) prober.ResetStats() diff --git a/go.mod b/go.mod index 81a55f23..a19cd943 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,20 @@ module github.com/datarhei/core/v16 -go 1.21.0 +go 1.22.0 toolchain go1.22.1 require ( - github.com/99designs/gqlgen v0.17.47 + github.com/99designs/gqlgen v0.17.49 github.com/Masterminds/semver/v3 v3.2.1 github.com/adhocore/gronx v1.8.1 github.com/andybalholm/brotli v1.1.0 github.com/atrox/haikunatorgo/v2 v2.0.1 github.com/caddyserver/certmagic v0.21.3 - github.com/casbin/casbin/v2 v2.90.0 github.com/datarhei/gosrt v0.6.0 github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e github.com/fujiwara/shapeio v1.0.0 - github.com/go-playground/validator/v10 v10.21.0 + github.com/go-playground/validator/v10 v10.22.0 github.com/gobwas/glob v0.2.3 github.com/goccy/go-json v0.10.3 github.com/golang-jwt/jwt/v4 v4.5.0 @@ -23,31 +22,32 @@ require ( github.com/google/gops v0.3.28 github.com/google/uuid v1.6.0 github.com/hashicorp/go-hclog v1.6.3 - github.com/hashicorp/raft v1.6.1 + github.com/hashicorp/raft v1.7.0 github.com/hashicorp/raft-boltdb/v2 v2.3.0 github.com/invopop/jsonschema v0.4.0 github.com/joho/godotenv v1.5.1 - github.com/klauspost/compress v1.17.8 - github.com/klauspost/cpuid/v2 v2.2.7 + github.com/klauspost/compress v1.17.9 + github.com/klauspost/cpuid/v2 v2.2.8 github.com/labstack/echo/v4 v4.12.0 github.com/lestrrat-go/strftime v1.0.6 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.70 + github.com/minio/minio-go/v7 v7.0.74 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 github.com/prometheus/client_golang v1.19.1 - github.com/puzpuzpuz/xsync/v3 v3.1.0 + github.com/puzpuzpuz/xsync/v3 v3.4.0 github.com/shirou/gopsutil/v3 v3.24.5 github.com/stretchr/testify v1.9.0 github.com/swaggo/echo-swagger v1.4.1 github.com/swaggo/swag v1.16.3 - github.com/vektah/gqlparser/v2 v2.5.12 + github.com/tklauser/go-sysconf v0.3.14 + github.com/vektah/gqlparser/v2 v2.5.16 github.com/xeipuuv/gojsonschema v1.2.0 go.etcd.io/bbolt v1.3.10 go.uber.org/automaxprocs v1.5.3 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.24.0 - golang.org/x/mod v0.18.0 + golang.org/x/crypto v0.25.0 + golang.org/x/mod v0.19.0 ) //replace github.com/datarhei/core-client-go/v16 => ../core-client-go @@ -60,7 +60,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/caddyserver/zerossl v0.1.3 // indirect - github.com/casbin/govaluate v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -68,6 +67,7 @@ require ( github.com/fatih/color v1.17.0 // indirect github.com/gabriel-vasile/mimetype v1.4.4 // indirect github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect @@ -76,7 +76,7 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect @@ -90,21 +90,21 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mholt/acmez/v2 v2.0.1 // indirect - github.com/miekg/dns v1.1.59 // indirect + github.com/miekg/dns v1.1.61 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sosodev/duration v1.3.1 // indirect - github.com/swaggo/files/v2 v2.0.0 // indirect - github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/swaggo/files/v2 v2.0.1 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/urfave/cli/v2 v2.27.2 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect @@ -115,14 +115,13 @@ require ( github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.22.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect + golang.org/x/tools v0.23.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index c4b26626..4d098652 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/99designs/gqlgen v0.17.47 h1:M9DTK8X3+3ATNBfZlHBwMwNngn4hhZWDxNmTiuQU5tQ= -github.com/99designs/gqlgen v0.17.47/go.mod h1:ejVkldSdtmuudqmtfaiqjwlGXWAhIv0DKXGXFY25F04= +github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ= +github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= @@ -39,11 +39,6 @@ github.com/caddyserver/certmagic v0.21.3 h1:pqRRry3yuB4CWBVq9+cUqu+Y6E2z8TswbhNx github.com/caddyserver/certmagic v0.21.3/go.mod h1:Zq6pklO9nVRl3DIFUw9gVUfXKdpc/0qwTUAQMBlfgtI= github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= -github.com/casbin/casbin/v2 v2.90.0 h1:65jvnocLmG8FfrfKcuqRd+Tp5B7R/OBxOVu1IhUMT5A= -github.com/casbin/casbin/v2 v2.90.0/go.mod h1:jX8uoN4veP85O/n2674r2qtfSXI6myvxW85f6TH50fw= -github.com/casbin/govaluate v1.1.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= -github.com/casbin/govaluate v1.1.1 h1:J1rFKIBhiC5xr0APd5HP6rDL+xt+BRoyq1pa4o2i/5c= -github.com/casbin/govaluate v1.1.1/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -72,6 +67,8 @@ github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -93,8 +90,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.21.0 h1:4fZA11ovvtkdgaeev9RGWPgc1uj3H8W+rNYyH/ySBb0= -github.com/go-playground/validator/v10 v10.21.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= +github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -107,8 +104,6 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -122,8 +117,8 @@ github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7 github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= @@ -142,8 +137,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/raft v1.6.1 h1:v/jm5fcYHvVkL0akByAp+IDdDSzCNCGhdO6VdB56HIM= -github.com/hashicorp/raft v1.6.1/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= +github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= @@ -160,12 +155,12 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -205,18 +200,20 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/acmez/v2 v2.0.1 h1:3/3N0u1pLjMK4sNEAFSI+bcvzbPhRpY383sy1kLHJ6k= github.com/mholt/acmez/v2 v2.0.1/go.mod h1:fX4c9r5jYwMyMsC+7tkYRxHibkOTgta5DIFGoe67e1U= -github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= -github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.70 h1:1u9NtMgfK1U42kUxcsl5v0yj6TEOPR497OAQxpJnn2g= -github.com/minio/minio-go/v7 v7.0.70/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= +github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0= +github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -245,15 +242,15 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4= -github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4= +github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= @@ -283,8 +280,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk= github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc= -github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw= -github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM= +github.com/swaggo/files/v2 v2.0.1 h1:XCVJO/i/VosCDsJu1YLpdejGsGnBE9deRMpjN4pJLHk= +github.com/swaggo/files/v2 v2.0.1/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM= github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= @@ -298,8 +295,8 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/gqlparser/v2 v2.5.12 h1:COMhVVnql6RoaF7+aTBWiTADdpLGyZWU3K/NwW0ph98= -github.com/vektah/gqlparser/v2 v2.5.12/go.mod h1:WQQjFc+I1YIzoPvZBhUQX7waZgg3pMLi0r8KymvAE2w= +github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8= +github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -329,18 +326,16 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= @@ -360,27 +355,24 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/http/api/avstream.go b/http/api/avstream.go index 7cb55769..5af4e90d 100644 --- a/http/api/avstream.go +++ b/http/api/avstream.go @@ -22,6 +22,17 @@ func (i *AVstreamIO) Unmarshal(io *app.AVstreamIO) { i.Size = io.Size } +func (i *AVstreamIO) Marshal() app.AVstreamIO { + io := app.AVstreamIO{ + State: i.State, + Packet: i.Packet, + Time: i.Time, + Size: i.Size, + } + + return io +} + type AVstream struct { Input AVstreamIO `json:"input"` Output AVstreamIO `json:"output"` @@ -56,3 +67,22 @@ func (a *AVstream) Unmarshal(av *app.AVstream) { a.Input.Unmarshal(&av.Input) a.Output.Unmarshal(&av.Output) } + +func (a *AVstream) Marshal() *app.AVstream { + av := &app.AVstream{ + Input: a.Input.Marshal(), + Output: a.Output.Marshal(), + Aqueue: a.Aqueue, + Queue: a.Queue, + Dup: a.Dup, + Drop: a.Drop, + Enc: a.Enc, + Looping: a.Looping, + LoopingRuntime: a.LoopingRuntime, + Duplicating: a.Duplicating, + GOP: a.GOP, + Mode: a.Mode, + } + + return av +} diff --git a/http/api/avstream_test.go b/http/api/avstream_test.go new file mode 100644 index 00000000..f8bb4d73 --- /dev/null +++ b/http/api/avstream_test.go @@ -0,0 +1,57 @@ +package api + +import ( + "testing" + + "github.com/datarhei/core/v16/restream/app" + + "github.com/stretchr/testify/require" +) + +func TestAVStreamIO(t *testing.T) { + original := app.AVstreamIO{ + State: "xxx", + Packet: 100, + Time: 42, + Size: 95744, + } + + p := AVstreamIO{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestAVStream(t *testing.T) { + original := app.AVstream{ + Input: app.AVstreamIO{ + State: "xxx", + Packet: 100, + Time: 42, + Size: 95744, + }, + Output: app.AVstreamIO{ + State: "yyy", + Packet: 7473, + Time: 57634, + Size: 363, + }, + Aqueue: 3829, + Queue: 4398, + Dup: 47, + Drop: 85, + Enc: 4578, + Looping: true, + LoopingRuntime: 483, + Duplicating: true, + GOP: "gop", + Mode: "mode", + } + + p := AVstream{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, &original, restored) +} diff --git a/http/api/cluster.go b/http/api/cluster.go index 29a896b3..1bfb45bc 100644 --- a/http/api/cluster.go +++ b/http/api/cluster.go @@ -43,8 +43,11 @@ type ClusterNodeResources struct { NCPU float64 `json:"ncpu"` CPU float64 `json:"cpu_used"` // percent 0-100*npcu CPULimit float64 `json:"cpu_limit"` // percent 0-100*npcu + CPUCore float64 `json:"cpu_core"` // percent 0-100*ncpu Mem uint64 `json:"memory_used_bytes"` // bytes MemLimit uint64 `json:"memory_limit_bytes"` // bytes + MemTotal uint64 `json:"memory_total_bytes"` // bytes + MemCore uint64 `json:"memory_core_bytes"` // bytes Error string `json:"error"` } diff --git a/http/api/event.go b/http/api/event.go index e48f256a..416e37d5 100644 --- a/http/api/event.go +++ b/http/api/event.go @@ -19,7 +19,7 @@ type Event struct { Data map[string]string `json:"data"` } -func (e *Event) Marshal(le *log.Event) { +func (e *Event) Unmarshal(le *log.Event) { e.Timestamp = le.Time.Unix() e.Level = int(le.Level) e.Component = strings.ToLower(le.Component) diff --git a/http/api/iam.go b/http/api/iam.go index 3109e51f..816b27fc 100644 --- a/http/api/iam.go +++ b/http/api/iam.go @@ -3,8 +3,8 @@ package api import ( "time" - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" ) type IAMUser struct { @@ -17,7 +17,7 @@ type IAMUser struct { Policies []IAMPolicy `json:"policies"` } -func (u *IAMUser) Marshal(user identity.User, policies []access.Policy) { +func (u *IAMUser) Marshal(user identity.User, policies []policy.Policy) { u.CreatedAt = user.CreatedAt.Unix() u.UpdatedAt = user.UpdatedAt.Unix() u.Name = user.Name @@ -52,7 +52,7 @@ func (u *IAMUser) Marshal(user identity.User, policies []access.Policy) { } } -func (u *IAMUser) Unmarshal() (identity.User, []access.Policy) { +func (u *IAMUser) Unmarshal() (identity.User, []policy.Policy) { iamuser := identity.User{ CreatedAt: time.Unix(u.CreatedAt, 0), UpdatedAt: time.Unix(u.UpdatedAt, 0), @@ -79,10 +79,10 @@ func (u *IAMUser) Unmarshal() (identity.User, []access.Policy) { }, } - iampolicies := []access.Policy{} + iampolicies := []policy.Policy{} for _, p := range u.Policies { - iampolicies = append(iampolicies, access.Policy{ + iampolicies = append(iampolicies, policy.Policy{ Name: u.Name, Domain: p.Domain, Types: p.Types, diff --git a/http/api/json.go b/http/api/json.go deleted file mode 100644 index f10b339b..00000000 --- a/http/api/json.go +++ /dev/null @@ -1,19 +0,0 @@ -package api - -import ( - "fmt" - - "github.com/datarhei/core/v16/encoding/json" -) - -func ToNumber(f float64) json.Number { - var s string - - if f == float64(int64(f)) { - s = fmt.Sprintf("%.0f", f) // 0 decimal if integer - } else { - s = fmt.Sprintf("%.3f", f) // max. 3 decimal if float - } - - return json.Number(s) -} diff --git a/http/api/probe.go b/http/api/probe.go index 5f16054b..7f077400 100644 --- a/http/api/probe.go +++ b/http/api/probe.go @@ -44,10 +44,10 @@ func (i *ProbeIO) Unmarshal(io *app.ProbeIO) { i.Type = io.Type i.Codec = io.Codec i.Coder = io.Coder - i.Bitrate = ToNumber(io.Bitrate) - i.Duration = ToNumber(io.Duration) + i.Bitrate = json.ToNumber(io.Bitrate) + i.Duration = json.ToNumber(io.Duration) - i.FPS = ToNumber(io.FPS) + i.FPS = json.ToNumber(io.FPS) i.Pixfmt = io.Pixfmt i.Width = io.Width i.Height = io.Height @@ -64,7 +64,7 @@ type Probe struct { Log []string `json:"log"` } -// Unmarshal converts a restreamer Probe to a Probe in API representation +// Unmarshal converts a core Probe to a Probe in API representation func (probe *Probe) Unmarshal(p *app.Probe) { if p == nil { return diff --git a/http/api/process.go b/http/api/process.go index a4b5c710..baf87707 100644 --- a/http/api/process.go +++ b/http/api/process.go @@ -1,6 +1,9 @@ package api import ( + "strconv" + + "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/restream/app" @@ -28,6 +31,114 @@ type Process struct { Metadata Metadata `json:"metadata,omitempty"` } +func (p *Process) Unmarshal(ap *app.Process, ac *app.Config, as *app.State, ar *app.Report, am interface{}) { + p.ID = ap.ID + p.Owner = ap.Owner + p.Domain = ap.Domain + p.Reference = ap.Reference + p.Type = "ffmpeg" + p.CreatedAt = ap.CreatedAt + p.UpdatedAt = ap.UpdatedAt + + p.Config = nil + if ac != nil { + p.Config = &ProcessConfig{} + p.Config.Unmarshal(ap.Config, nil) + } + + p.State = nil + if as != nil { + p.State = &ProcessState{} + p.State.Unmarshal(as) + } + + p.Report = nil + if ar != nil { + p.Report = &ProcessReport{} + p.Report.Unmarshal(ar) + } + + p.Metadata = nil + if am != nil { + p.Metadata = NewMetadata(am) + } +} + +func (p *Process) UnmarshalStore(s store.Process, config, state, report, metadata bool) { + p.ID = s.Config.ID + p.Owner = s.Config.Owner + p.Domain = s.Config.Domain + p.Type = "ffmpeg" + p.Reference = s.Config.Reference + p.CreatedAt = s.CreatedAt.Unix() + p.UpdatedAt = s.UpdatedAt.Unix() + + p.Metadata = nil + if metadata { + p.Metadata = s.Metadata + } + + p.Config = nil + if config { + config := &ProcessConfig{} + config.Unmarshal(s.Config, s.Metadata) + + p.Config = config + } + + p.State = nil + if state { + p.State = &ProcessState{ + Order: s.Order, + LastLog: s.Error, + Resources: ProcessUsage{ + CPU: ProcessUsageCPU{ + NCPU: json.ToNumber(1), + Limit: json.ToNumber(s.Config.LimitCPU), + }, + Memory: ProcessUsageMemory{ + Limit: s.Config.LimitMemory, + }, + }, + Command: []string{}, + Progress: &Progress{ + Input: []ProgressIO{}, + Output: []ProgressIO{}, + Mapping: StreamMapping{ + Graphs: []GraphElement{}, + Mapping: []GraphMapping{}, + }, + }, + } + + if len(s.Error) != 0 { + p.State.State = "failed" + } else { + p.State.State = "deploying" + } + } + + if report { + p.Report = &ProcessReport{ + ProcessReportEntry: ProcessReportEntry{ + CreatedAt: s.CreatedAt.Unix(), + Prelude: []string{}, + Log: [][2]string{}, + Matches: []string{}, + }, + } + + if len(s.Error) != 0 { + p.Report.Prelude = []string{s.Error} + p.Report.Log = [][2]string{ + {strconv.FormatInt(s.CreatedAt.Unix(), 10), s.Error}, + } + //process.Report.ExitedAt = p.CreatedAt.Unix() + //process.Report.ExitState = "failed" + } + } +} + // ProcessConfigIO represents an input or output of an ffmpeg process config type ProcessConfigIO struct { ID string `json:"id"` @@ -70,7 +181,7 @@ type ProcessConfig struct { Metadata map[string]interface{} `json:"metadata,omitempty"` } -// Marshal converts a process config in API representation to a restreamer process config and metadata +// Marshal converts a process config in API representation to a core process config and metadata func (cfg *ProcessConfig) Marshal() (*app.Config, map[string]interface{}) { p := &app.Config{ ID: cfg.ID, @@ -153,8 +264,8 @@ func (cfg *ProcessConfig) generateInputOutputIDs(ioconfig []ProcessConfigIO) { } } -// Unmarshal converts a restream process config to a process config in API representation -func (cfg *ProcessConfig) Unmarshal(c *app.Config) { +// Unmarshal converts a core process config to a process config in API representation +func (cfg *ProcessConfig) Unmarshal(c *app.Config, metadata map[string]interface{}) { if c == nil { return } @@ -212,6 +323,8 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config) { cfg.LogPatterns = make([]string, len(c.LogPatterns)) copy(cfg.LogPatterns, c.LogPatterns) + + cfg.Metadata = metadata } func (p *ProcessConfig) ProcessID() app.ProcessID { @@ -236,7 +349,7 @@ type ProcessState struct { Command []string `json:"command"` } -// Unmarshal converts a restreamer ffmpeg process state to a state in API representation +// Unmarshal converts a core ffmpeg process state to a state in API representation func (s *ProcessState) Unmarshal(state *app.State) { if state == nil { return @@ -249,19 +362,19 @@ func (s *ProcessState) Unmarshal(state *app.State) { s.LastLog = state.LastLog s.Progress = &Progress{} s.Memory = state.Memory - s.CPU = ToNumber(state.CPU) + s.CPU = json.ToNumber(state.CPU) s.LimitMode = state.LimitMode s.Resources.CPU = ProcessUsageCPU{ - NCPU: ToNumber(state.Resources.CPU.NCPU), - Current: ToNumber(state.Resources.CPU.Current), - Average: ToNumber(state.Resources.CPU.Average), - Max: ToNumber(state.Resources.CPU.Max), - Limit: ToNumber(state.Resources.CPU.Limit), + NCPU: json.ToNumber(state.Resources.CPU.NCPU), + Current: json.ToNumber(state.Resources.CPU.Current), + Average: json.ToNumber(state.Resources.CPU.Average), + Max: json.ToNumber(state.Resources.CPU.Max), + Limit: json.ToNumber(state.Resources.CPU.Limit), IsThrottling: state.Resources.CPU.IsThrottling, } s.Resources.Memory = ProcessUsageMemory{ Current: state.Resources.Memory.Current, - Average: ToNumber(state.Resources.Memory.Average), + Average: json.ToNumber(state.Resources.Memory.Average), Max: state.Resources.Memory.Max, Limit: state.Resources.Memory.Limit, } @@ -279,6 +392,43 @@ type ProcessUsageCPU struct { IsThrottling bool `json:"throttling"` } +func (p *ProcessUsageCPU) Unmarshal(pp *app.ProcessUsageCPU) { + p.NCPU = json.ToNumber(pp.NCPU) + p.Current = json.ToNumber(pp.Current) + p.Average = json.ToNumber(pp.Average) + p.Max = json.ToNumber(pp.Max) + p.Limit = json.ToNumber(pp.Limit) + p.IsThrottling = pp.IsThrottling +} + +func (p *ProcessUsageCPU) Marshal() app.ProcessUsageCPU { + pp := app.ProcessUsageCPU{ + IsThrottling: p.IsThrottling, + } + + if x, err := p.NCPU.Float64(); err == nil { + pp.NCPU = x + } + + if x, err := p.Current.Float64(); err == nil { + pp.Current = x + } + + if x, err := p.Average.Float64(); err == nil { + pp.Average = x + } + + if x, err := p.Max.Float64(); err == nil { + pp.Max = x + } + + if x, err := p.Limit.Float64(); err == nil { + pp.Limit = x + } + + return pp +} + type ProcessUsageMemory struct { Current uint64 `json:"cur" format:"uint64"` Average json.Number `json:"avg" swaggertype:"number" jsonschema:"type=number"` @@ -286,7 +436,42 @@ type ProcessUsageMemory struct { Limit uint64 `json:"limit" format:"uint64"` } +func (p *ProcessUsageMemory) Unmarshal(pp *app.ProcessUsageMemory) { + p.Current = pp.Current + p.Average = json.ToNumber(pp.Average) + p.Max = pp.Max + p.Limit = pp.Limit +} + +func (p *ProcessUsageMemory) Marshal() app.ProcessUsageMemory { + pp := app.ProcessUsageMemory{ + Current: p.Current, + Max: p.Max, + Limit: p.Limit, + } + + if x, err := p.Average.Float64(); err == nil { + pp.Average = x + } + + return pp +} + type ProcessUsage struct { CPU ProcessUsageCPU `json:"cpu_usage"` Memory ProcessUsageMemory `json:"memory_bytes"` } + +func (p *ProcessUsage) Unmarshal(pp *app.ProcessUsage) { + p.CPU.Unmarshal(&pp.CPU) + p.Memory.Unmarshal(&pp.Memory) +} + +func (p *ProcessUsage) Marshal() app.ProcessUsage { + pp := app.ProcessUsage{ + CPU: p.CPU.Marshal(), + Memory: p.Memory.Marshal(), + } + + return pp +} diff --git a/http/api/process_test.go b/http/api/process_test.go new file mode 100644 index 00000000..6dddce39 --- /dev/null +++ b/http/api/process_test.go @@ -0,0 +1,114 @@ +package api + +import ( + "testing" + + "github.com/datarhei/core/v16/restream/app" + + "github.com/stretchr/testify/require" +) + +func TestProcessUsageCPU(t *testing.T) { + original := app.ProcessUsageCPU{ + NCPU: 1.5, + Current: 0.7, + Average: 0.9, + Max: 1.3, + Limit: 100, + IsThrottling: true, + } + + p := ProcessUsageCPU{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProcessUsageMemory(t *testing.T) { + original := app.ProcessUsageMemory{ + Current: 100, + Average: 72, + Max: 150, + Limit: 200, + } + + p := ProcessUsageMemory{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProcessUsage(t *testing.T) { + original := app.ProcessUsage{ + CPU: app.ProcessUsageCPU{ + NCPU: 1.5, + Current: 0.7, + Average: 0.9, + Max: 1.3, + Limit: 100, + IsThrottling: true, + }, + Memory: app.ProcessUsageMemory{ + Current: 100, + Average: 72, + Max: 150, + Limit: 200, + }, + } + + p := ProcessUsage{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProcessConfig(t *testing.T) { + original := app.Config{ + ID: "foobar", + Reference: "none", + Owner: "me", + Domain: "all", + Input: []app.ConfigIO{ + { + ID: "in", + Address: "example_in", + Options: []string{"io1", "io2"}, + }, + }, + Output: []app.ConfigIO{ + { + ID: "out", + Address: "example_out", + Options: []string{"oo1", "oo2", "oo3"}, + Cleanup: []app.ConfigIOCleanup{ + { + Pattern: "xxxx", + MaxFiles: 5, + MaxFileAge: 100, + PurgeOnDelete: true, + }, + }, + }, + }, + Options: []string{"o1", "o2", "o3"}, + Reconnect: true, + ReconnectDelay: 20, + Autostart: true, + StaleTimeout: 50, + Timeout: 60, + Scheduler: "xxx", + LogPatterns: []string{"bla", "blubb"}, + LimitCPU: 10, + LimitMemory: 100 * 1024 * 1024, + LimitWaitFor: 20, + } + + p := ProcessConfig{} + p.Unmarshal(&original, nil) + restored, _ := p.Marshal() + + require.Equal(t, &original, restored) +} diff --git a/http/api/progress.go b/http/api/progress.go index e7b48116..93671b57 100644 --- a/http/api/progress.go +++ b/http/api/progress.go @@ -1,8 +1,6 @@ package api import ( - "fmt" - "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/restream/app" ) @@ -50,7 +48,7 @@ type ProgressIO struct { AVstream *AVstream `json:"avstream" jsonschema:"anyof_type=null;object"` } -// Unmarshal converts a restreamer ProgressIO to a ProgressIO in API representation +// Unmarshal converts a core ProgressIO to a ProgressIO in API representation func (i *ProgressIO) Unmarshal(io *app.ProgressIO) { if io == nil { return @@ -66,17 +64,17 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) { i.Coder = io.Coder i.Frame = io.Frame i.Keyframe = io.Keyframe - i.Framerate.Min = json.Number(fmt.Sprintf("%.3f", io.Framerate.Min)) - i.Framerate.Max = json.Number(fmt.Sprintf("%.3f", io.Framerate.Max)) - i.Framerate.Average = json.Number(fmt.Sprintf("%.3f", io.Framerate.Average)) - i.FPS = json.Number(fmt.Sprintf("%.3f", io.FPS)) + i.Framerate.Min = json.ToNumber(io.Framerate.Min) + i.Framerate.Max = json.ToNumber(io.Framerate.Max) + i.Framerate.Average = json.ToNumber(io.Framerate.Average) + i.FPS = json.ToNumber(io.FPS) i.Packet = io.Packet - i.PPS = json.Number(fmt.Sprintf("%.3f", io.PPS)) + i.PPS = json.ToNumber(io.PPS) i.Size = io.Size / 1024 - i.Bitrate = json.Number(fmt.Sprintf("%.3f", io.Bitrate/1024)) + i.Bitrate = json.ToNumber(io.Bitrate / 1024) i.Extradata = io.Extradata i.Pixfmt = io.Pixfmt - i.Quantizer = json.Number(fmt.Sprintf("%.3f", io.Quantizer)) + i.Quantizer = json.ToNumber(io.Quantizer) i.Width = io.Width i.Height = io.Height i.Sampling = io.Sampling @@ -89,6 +87,64 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) { } } +func (i *ProgressIO) Marshal() app.ProgressIO { + p := app.ProgressIO{ + ID: i.ID, + Address: i.Address, + Index: i.Index, + Stream: i.Stream, + Format: i.Format, + Type: i.Type, + Codec: i.Codec, + Coder: i.Coder, + Frame: i.Frame, + Keyframe: i.Keyframe, + Packet: i.Packet, + Size: i.Size * 1024, + Extradata: i.Extradata, + Pixfmt: i.Pixfmt, + Width: i.Width, + Height: i.Height, + Sampling: i.Sampling, + Layout: i.Layout, + Channels: i.Channels, + } + + if x, err := i.Framerate.Min.Float64(); err == nil { + p.Framerate.Min = x + } + + if x, err := i.Framerate.Max.Float64(); err == nil { + p.Framerate.Max = x + } + + if x, err := i.Framerate.Average.Float64(); err == nil { + p.Framerate.Average = x + } + + if x, err := i.FPS.Float64(); err == nil { + p.FPS = x + } + + if x, err := i.PPS.Float64(); err == nil { + p.PPS = x + } + + if x, err := i.Bitrate.Float64(); err == nil { + p.Bitrate = x * 1024 + } + + if x, err := i.Quantizer.Float64(); err == nil { + p.Quantizer = x + } + + if i.AVstream != nil { + p.AVstream = i.AVstream.Marshal() + } + + return p +} + // Progress represents the progress of an ffmpeg process type Progress struct { Started bool `json:"started"` @@ -107,38 +163,82 @@ type Progress struct { Dup uint64 `json:"dup" format:"uint64"` } -// Unmarshal converts a restreamer Progress to a Progress in API representation -func (progress *Progress) Unmarshal(p *app.Progress) { - progress.Input = []ProgressIO{} - progress.Output = []ProgressIO{} +// Unmarshal converts a core Progress to a Progress in API representation +func (p *Progress) Unmarshal(pp *app.Progress) { + p.Input = []ProgressIO{} + p.Output = []ProgressIO{} - if p == nil { + if pp == nil { return } - progress.Started = p.Started - progress.Input = make([]ProgressIO, len(p.Input)) - progress.Output = make([]ProgressIO, len(p.Output)) - progress.Frame = p.Frame - progress.Packet = p.Packet - progress.FPS = ToNumber(p.FPS) - progress.Quantizer = ToNumber(p.Quantizer) - progress.Size = p.Size / 1024 - progress.Time = ToNumber(p.Time) - progress.Bitrate = ToNumber(p.Bitrate / 1024) - progress.Speed = ToNumber(p.Speed) - progress.Drop = p.Drop - progress.Dup = p.Dup + p.Started = pp.Started + p.Input = make([]ProgressIO, len(pp.Input)) + p.Output = make([]ProgressIO, len(pp.Output)) + p.Frame = pp.Frame + p.Packet = pp.Packet + p.FPS = json.ToNumber(pp.FPS) + p.Quantizer = json.ToNumber(pp.Quantizer) + p.Size = pp.Size / 1024 + p.Time = json.ToNumber(pp.Time) + p.Bitrate = json.ToNumber(pp.Bitrate / 1024) + p.Speed = json.ToNumber(pp.Speed) + p.Drop = pp.Drop + p.Dup = pp.Dup - for i, io := range p.Input { - progress.Input[i].Unmarshal(&io) + for i, io := range pp.Input { + p.Input[i].Unmarshal(&io) } - for i, io := range p.Output { - progress.Output[i].Unmarshal(&io) + for i, io := range pp.Output { + p.Output[i].Unmarshal(&io) } - progress.Mapping.Unmarshal(&p.Mapping) + p.Mapping.Unmarshal(&pp.Mapping) +} + +func (p *Progress) Marshal() app.Progress { + pp := app.Progress{ + Started: p.Started, + Input: make([]app.ProgressIO, 0, len(p.Input)), + Output: make([]app.ProgressIO, 0, len(p.Output)), + Mapping: p.Mapping.Marshal(), + Frame: p.Frame, + Packet: p.Packet, + Size: p.Size * 1024, + Drop: p.Drop, + Dup: p.Dup, + } + + if x, err := p.FPS.Float64(); err == nil { + pp.FPS = x + } + + if x, err := p.Quantizer.Float64(); err == nil { + pp.Quantizer = x + } + + if x, err := p.Time.Float64(); err == nil { + pp.Time = x + } + + if x, err := p.Bitrate.Float64(); err == nil { + pp.Bitrate = x * 1024 + } + + if x, err := p.Speed.Float64(); err == nil { + pp.Speed = x + } + + for _, io := range p.Input { + pp.Input = append(pp.Input, io.Marshal()) + } + + for _, io := range p.Output { + pp.Output = append(pp.Output, io.Marshal()) + } + + return pp } type GraphElement struct { @@ -158,6 +258,44 @@ type GraphElement struct { Height uint64 `json:"height"` } +func (g *GraphElement) Unmarshal(a *app.GraphElement) { + g.Index = a.Index + g.Name = a.Name + g.Filter = a.Filter + g.DstName = a.DstName + g.DstFilter = a.DstFilter + g.Inpad = a.Inpad + g.Outpad = a.Outpad + g.Timebase = a.Timebase + g.Type = a.Type + g.Format = a.Format + g.Sampling = a.Sampling + g.Layout = a.Layout + g.Width = a.Width + g.Height = a.Height +} + +func (g *GraphElement) Marshal() app.GraphElement { + a := app.GraphElement{ + Index: g.Index, + Name: g.Name, + Filter: g.Filter, + DstName: g.DstName, + DstFilter: g.DstFilter, + Inpad: g.Inpad, + Outpad: g.Outpad, + Timebase: g.Timebase, + Type: g.Type, + Format: g.Format, + Sampling: g.Sampling, + Layout: g.Layout, + Width: g.Width, + Height: g.Height, + } + + return a +} + type GraphMapping struct { Input int `json:"input"` Output int `json:"output"` @@ -166,45 +304,57 @@ type GraphMapping struct { Copy bool `json:"copy"` } +func (g *GraphMapping) Unmarshal(a *app.GraphMapping) { + g.Input = a.Input + g.Output = a.Output + g.Index = a.Index + g.Name = a.Name + g.Copy = a.Copy +} + +func (g *GraphMapping) Marshal() app.GraphMapping { + a := app.GraphMapping{ + Input: g.Input, + Output: g.Output, + Index: g.Index, + Name: g.Name, + Copy: g.Copy, + } + + return a +} + type StreamMapping struct { Graphs []GraphElement `json:"graphs"` Mapping []GraphMapping `json:"mapping"` } -// Unmarshal converts a restreamer StreamMapping to a StreamMapping in API representation +// Unmarshal converts a core StreamMapping to a StreamMapping in API representation func (s *StreamMapping) Unmarshal(m *app.StreamMapping) { - s.Graphs = make([]GraphElement, 0, len(m.Graphs)) - for _, mge := range m.Graphs { - ge := GraphElement{ - Index: mge.Index, - Name: mge.Name, - Filter: mge.Filter, - DstName: mge.DstName, - DstFilter: mge.DstFilter, - Inpad: mge.Inpad, - Outpad: mge.Outpad, - Timebase: mge.Timebase, - Type: mge.Type, - Format: mge.Format, - Sampling: mge.Sampling, - Layout: mge.Layout, - Width: mge.Width, - Height: mge.Height, - } - - s.Graphs = append(s.Graphs, ge) - } - - s.Mapping = make([]GraphMapping, 0, len(m.Mapping)) - for _, mmapping := range m.Mapping { - mapping := GraphMapping{ - Input: mmapping.Input, - Output: mmapping.Output, - Index: mmapping.Index, - Name: mmapping.Name, - Copy: mmapping.Copy, - } - - s.Mapping = append(s.Mapping, mapping) + s.Graphs = make([]GraphElement, len(m.Graphs)) + for i, graph := range m.Graphs { + s.Graphs[i].Unmarshal(&graph) + } + + s.Mapping = make([]GraphMapping, len(m.Mapping)) + for i, mapping := range m.Mapping { + s.Mapping[i].Unmarshal(&mapping) } } + +func (s *StreamMapping) Marshal() app.StreamMapping { + m := app.StreamMapping{ + Graphs: make([]app.GraphElement, 0, len(s.Graphs)), + Mapping: make([]app.GraphMapping, 0, len(s.Mapping)), + } + + for _, graph := range s.Graphs { + m.Graphs = append(m.Graphs, graph.Marshal()) + } + + for _, mapping := range s.Mapping { + m.Mapping = append(m.Mapping, mapping.Marshal()) + } + + return m +} diff --git a/http/api/progress_test.go b/http/api/progress_test.go new file mode 100644 index 00000000..812e434c --- /dev/null +++ b/http/api/progress_test.go @@ -0,0 +1,331 @@ +package api + +import ( + "testing" + + "github.com/datarhei/core/v16/restream/app" + + "github.com/stretchr/testify/require" +) + +func TestGraphMapping(t *testing.T) { + original := app.GraphMapping{ + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + } + + p := GraphMapping{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestGraphElement(t *testing.T) { + original := app.GraphElement{ + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + } + + p := GraphElement{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestStreamMapping(t *testing.T) { + original := app.StreamMapping{ + Graphs: []app.GraphElement{ + { + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + }, + }, + Mapping: []app.GraphMapping{ + { + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + }, + }, + } + + p := StreamMapping{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProgressIO(t *testing.T) { + original := app.ProgressIO{ + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: nil, + } + + p := ProgressIO{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProgressIOAVstream(t *testing.T) { + original := app.ProgressIO{ + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: &app.AVstream{ + Input: app.AVstreamIO{ + State: "xxx", + Packet: 100, + Time: 42, + Size: 95744, + }, + Output: app.AVstreamIO{ + State: "yyy", + Packet: 7473, + Time: 57634, + Size: 363, + }, + Aqueue: 3829, + Queue: 4398, + Dup: 47, + Drop: 85, + Enc: 4578, + Looping: true, + LoopingRuntime: 483, + Duplicating: true, + GOP: "gop", + Mode: "mode", + }, + } + + p := ProgressIO{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProgress(t *testing.T) { + original := app.Progress{ + Started: true, + Input: []app.ProgressIO{ + { + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: &app.AVstream{ + Input: app.AVstreamIO{ + State: "xxx", + Packet: 100, + Time: 42, + Size: 95744, + }, + Output: app.AVstreamIO{ + State: "yyy", + Packet: 7473, + Time: 57634, + Size: 363, + }, + Aqueue: 3829, + Queue: 4398, + Dup: 47, + Drop: 85, + Enc: 4578, + Looping: true, + LoopingRuntime: 483, + Duplicating: true, + GOP: "gop", + Mode: "mode", + }, + }, + }, + Output: []app.ProgressIO{ + { + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: nil, + }, + }, + Mapping: app.StreamMapping{ + Graphs: []app.GraphElement{ + { + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + }, + }, + Mapping: []app.GraphMapping{ + { + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + }, + }, + }, + Frame: 329, + Packet: 4343, + FPS: 84.2, + Quantizer: 234.2, + Size: 339393 * 1024, + Time: 494, + Bitrate: 33848.2 * 1024, + Speed: 293.2, + Drop: 2393, + Dup: 5958, + } + + p := Progress{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} diff --git a/http/api/report.go b/http/api/report.go index 49c67a74..025f0f94 100644 --- a/http/api/report.go +++ b/http/api/report.go @@ -2,82 +2,122 @@ package api import ( "strconv" + "time" "github.com/datarhei/core/v16/restream/app" ) -// ProcessReportEntry represents the logs of a run of a restream process +// ProcessReportEntry represents the logs of a run of a core process type ProcessReportEntry struct { - CreatedAt int64 `json:"created_at" format:"int64"` - Prelude []string `json:"prelude,omitempty"` - Log [][2]string `json:"log,omitempty"` - Matches []string `json:"matches,omitempty"` + CreatedAt int64 `json:"created_at" format:"int64"` + Prelude []string `json:"prelude,omitempty"` + Log [][2]string `json:"log,omitempty"` + Matches []string `json:"matches,omitempty"` +} + +func (r *ProcessReportEntry) Unmarshal(p *app.ReportEntry) { + r.CreatedAt = p.CreatedAt.Unix() + r.Prelude = p.Prelude + r.Log = make([][2]string, len(p.Log)) + for i, line := range p.Log { + r.Log[i][0] = strconv.FormatInt(line.Timestamp.Unix(), 10) + r.Log[i][1] = line.Data + } + r.Matches = p.Matches +} + +func (r *ProcessReportEntry) Marshal() app.ReportEntry { + p := app.ReportEntry{ + CreatedAt: time.Unix(r.CreatedAt, 0), + Prelude: r.Prelude, + Log: make([]app.LogLine, 0, len(r.Log)), + Matches: r.Matches, + } + + for _, l := range r.Log { + ts, _ := strconv.ParseInt(l[0], 10, 64) + p.Log = append(p.Log, app.LogLine{ + Timestamp: time.Unix(ts, 0), + Data: l[1], + }) + } + + return p +} + +type ProcessReportHistoryEntry struct { + ProcessReportEntry + ExitedAt int64 `json:"exited_at,omitempty" format:"int64"` ExitState string `json:"exit_state,omitempty"` Progress *Progress `json:"progress,omitempty"` Resources *ProcessUsage `json:"resources,omitempty"` } -type ProcessReportHistoryEntry struct { - ProcessReportEntry +func (r *ProcessReportHistoryEntry) Unmarshal(p *app.ReportHistoryEntry) { + r.ProcessReportEntry.Unmarshal(&p.ReportEntry) + + r.ExitedAt = p.ExitedAt.Unix() + r.ExitState = p.ExitState + + r.Resources = &ProcessUsage{} + r.Resources.Unmarshal(&p.Usage) + + r.Progress = &Progress{} + r.Progress.Unmarshal(&p.Progress) +} + +func (r *ProcessReportHistoryEntry) Marshal() app.ReportHistoryEntry { + p := app.ReportHistoryEntry{ + ReportEntry: r.ProcessReportEntry.Marshal(), + ExitedAt: time.Unix(r.ExitedAt, 0), + ExitState: r.ExitState, + Progress: app.Progress{}, + Usage: app.ProcessUsage{}, + } + + if r.Progress != nil { + p.Progress = r.Progress.Marshal() + } + + if r.Resources != nil { + p.Usage = r.Resources.Marshal() + } + + return p } // ProcessReport represents the current log and the logs of previous runs of a restream process type ProcessReport struct { ProcessReportEntry - History []ProcessReportEntry `json:"history"` + History []ProcessReportHistoryEntry `json:"history"` } -// Unmarshal converts a restream log to a report -func (report *ProcessReport) Unmarshal(l *app.Report) { +// Unmarshal converts a core report to a report +func (r *ProcessReport) Unmarshal(l *app.Report) { if l == nil { return } - report.CreatedAt = l.CreatedAt.Unix() - report.Prelude = l.Prelude - report.Log = make([][2]string, len(l.Log)) - for i, line := range l.Log { - report.Log[i][0] = strconv.FormatInt(line.Timestamp.Unix(), 10) - report.Log[i][1] = line.Data + r.ProcessReportEntry.Unmarshal(&l.ReportEntry) + r.History = make([]ProcessReportHistoryEntry, len(l.History)) + + for i, h := range l.History { + r.History[i].Unmarshal(&h) + } +} + +func (r *ProcessReport) Marshal() app.Report { + p := app.Report{ + ReportEntry: r.ProcessReportEntry.Marshal(), + History: make([]app.ReportHistoryEntry, 0, len(r.History)), } - report.Matches = l.Matches - - report.History = []ProcessReportEntry{} - - for _, h := range l.History { - he := ProcessReportEntry{ - CreatedAt: h.CreatedAt.Unix(), - Prelude: h.Prelude, - Log: make([][2]string, len(h.Log)), - Matches: h.Matches, - ExitedAt: h.ExitedAt.Unix(), - ExitState: h.ExitState, - Resources: &ProcessUsage{ - CPU: ProcessUsageCPU{ - NCPU: ToNumber(h.Usage.CPU.NCPU), - Average: ToNumber(h.Usage.CPU.Average), - Max: ToNumber(h.Usage.CPU.Max), - Limit: ToNumber(h.Usage.CPU.Limit), - }, - Memory: ProcessUsageMemory{ - Average: ToNumber(h.Usage.Memory.Average), - Max: h.Usage.Memory.Max, - Limit: h.Usage.Memory.Limit, - }, - }, - } - - he.Progress = &Progress{} - he.Progress.Unmarshal(&h.Progress) - - for i, line := range h.Log { - he.Log[i][0] = strconv.FormatInt(line.Timestamp.Unix(), 10) - he.Log[i][1] = line.Data - } - - report.History = append(report.History, he) + + for _, h := range r.History { + p.History = append(p.History, h.Marshal()) } + + return p } type ProcessReportSearchResult struct { diff --git a/http/api/report_test.go b/http/api/report_test.go new file mode 100644 index 00000000..6347c8a4 --- /dev/null +++ b/http/api/report_test.go @@ -0,0 +1,403 @@ +package api + +import ( + "testing" + "time" + + "github.com/datarhei/core/v16/restream/app" + + "github.com/stretchr/testify/require" +) + +func TestProcessReportEntry(t *testing.T) { + original := app.ReportEntry{ + CreatedAt: time.Unix(12345, 0), + Prelude: []string{"lalala", "lululu"}, + Log: []app.LogLine{ + { + Timestamp: time.Unix(123, 0), + Data: "xxx", + }, + { + Timestamp: time.Unix(124, 0), + Data: "yyy", + }, + }, + Matches: []string{"match1", "match2", "match3"}, + } + + p := ProcessReportEntry{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProcessReportHistoryEntry(t *testing.T) { + original := app.ReportHistoryEntry{ + ReportEntry: app.ReportEntry{ + CreatedAt: time.Unix(12345, 0), + Prelude: []string{"lalala", "lululu"}, + Log: []app.LogLine{ + { + Timestamp: time.Unix(123, 0), + Data: "xxx", + }, + { + Timestamp: time.Unix(124, 0), + Data: "yyy", + }, + }, + Matches: []string{"match1", "match2", "match3"}, + }, + ExitedAt: time.Unix(394949, 0), + ExitState: "kaputt", + Progress: app.Progress{ + Started: true, + Input: []app.ProgressIO{ + { + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: &app.AVstream{ + Input: app.AVstreamIO{ + State: "xxx", + Packet: 100, + Time: 42, + Size: 95744, + }, + Output: app.AVstreamIO{ + State: "yyy", + Packet: 7473, + Time: 57634, + Size: 363, + }, + Aqueue: 3829, + Queue: 4398, + Dup: 47, + Drop: 85, + Enc: 4578, + Looping: true, + LoopingRuntime: 483, + Duplicating: true, + GOP: "gop", + Mode: "mode", + }, + }, + }, + Output: []app.ProgressIO{ + { + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: nil, + }, + }, + Mapping: app.StreamMapping{ + Graphs: []app.GraphElement{ + { + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + }, + }, + Mapping: []app.GraphMapping{ + { + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + }, + }, + }, + Frame: 329, + Packet: 4343, + FPS: 84.2, + Quantizer: 234.2, + Size: 339393 * 1024, + Time: 494, + Bitrate: 33848.2 * 1024, + Speed: 293.2, + Drop: 2393, + Dup: 5958, + }, + Usage: app.ProcessUsage{ + CPU: app.ProcessUsageCPU{ + NCPU: 1.5, + Current: 0.7, + Average: 0.9, + Max: 1.3, + Limit: 100, + IsThrottling: true, + }, + Memory: app.ProcessUsageMemory{ + Current: 100, + Average: 72, + Max: 150, + Limit: 200, + }, + }, + } + + p := ProcessReportHistoryEntry{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} + +func TestProcessReport(t *testing.T) { + original := app.Report{ + ReportEntry: app.ReportEntry{ + CreatedAt: time.Unix(12345, 0), + Prelude: []string{"lalala", "lululu"}, + Log: []app.LogLine{ + { + Timestamp: time.Unix(123, 0), + Data: "xxx", + }, + { + Timestamp: time.Unix(124, 0), + Data: "yyy", + }, + }, + Matches: []string{"match1", "match2", "match3"}, + }, + History: []app.ReportHistoryEntry{ + { + ReportEntry: app.ReportEntry{ + CreatedAt: time.Unix(12345, 0), + Prelude: []string{"lalala", "lululu"}, + Log: []app.LogLine{ + { + Timestamp: time.Unix(123, 0), + Data: "xxx", + }, + { + Timestamp: time.Unix(124, 0), + Data: "yyy", + }, + }, + Matches: []string{"match1", "match2", "match3"}, + }, + ExitedAt: time.Unix(394949, 0), + ExitState: "kaputt", + Progress: app.Progress{ + Started: true, + Input: []app.ProgressIO{ + { + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: &app.AVstream{ + Input: app.AVstreamIO{ + State: "xxx", + Packet: 100, + Time: 42, + Size: 95744, + }, + Output: app.AVstreamIO{ + State: "yyy", + Packet: 7473, + Time: 57634, + Size: 363, + }, + Aqueue: 3829, + Queue: 4398, + Dup: 47, + Drop: 85, + Enc: 4578, + Looping: true, + LoopingRuntime: 483, + Duplicating: true, + GOP: "gop", + Mode: "mode", + }, + }, + }, + Output: []app.ProgressIO{ + { + ID: "id", + Address: "jfdk", + Index: 4, + Stream: 7, + Format: "rtmp", + Type: "video", + Codec: "x", + Coder: "y", + Frame: 133, + Keyframe: 39, + Framerate: app.ProgressIOFramerate{ + Min: 12.5, + Max: 30.0, + Average: 25.9, + }, + FPS: 25.3, + Packet: 442, + PPS: 45.5, + Size: 45944 * 1024, + Bitrate: 5848.22 * 1024, + Extradata: 34, + Pixfmt: "yuv420p", + Quantizer: 494.2, + Width: 10393, + Height: 4933, + Sampling: 58483, + Layout: "atmos", + Channels: 4944, + AVstream: nil, + }, + }, + Mapping: app.StreamMapping{ + Graphs: []app.GraphElement{ + { + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + }, + }, + Mapping: []app.GraphMapping{ + { + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + }, + }, + }, + Frame: 329, + Packet: 4343, + FPS: 84.2, + Quantizer: 234.2, + Size: 339393 * 1024, + Time: 494, + Bitrate: 33848.2 * 1024, + Speed: 293.2, + Drop: 2393, + Dup: 5958, + }, + Usage: app.ProcessUsage{ + CPU: app.ProcessUsageCPU{ + NCPU: 1.5, + Current: 0.7, + Average: 0.9, + Max: 1.3, + Limit: 100, + IsThrottling: true, + }, + Memory: app.ProcessUsageMemory{ + Current: 100, + Average: 72, + Max: 150, + Limit: 200, + }, + }, + }, + }, + } + + p := ProcessReport{} + p.Unmarshal(&original) + restored := p.Marshal() + + require.Equal(t, original, restored) +} diff --git a/http/api/session.go b/http/api/session.go index 3ce69839..3ee588df 100644 --- a/http/api/session.go +++ b/http/api/session.go @@ -42,8 +42,8 @@ func (s *Session) Unmarshal(sess session.Session) { s.Extra = sess.Extra s.RxBytes = sess.RxBytes s.TxBytes = sess.TxBytes - s.RxBitrate = ToNumber(sess.RxBitrate / 1024) - s.TxBitrate = ToNumber(sess.TxBitrate / 1024) + s.RxBitrate = json.ToNumber(sess.RxBitrate / 1024) + s.TxBitrate = json.ToNumber(sess.TxBitrate / 1024) } // SessionSummaryActive represents the currently active sessions @@ -80,12 +80,12 @@ type SessionsActive map[string][]Session // Unmarshal creates a new SessionSummary from a session.Summary func (summary *SessionSummary) Unmarshal(sum session.Summary) { summary.Active.MaxSessions = sum.MaxSessions - summary.Active.MaxRxBitrate = ToNumber(sum.MaxRxBitrate / 1024 / 1024) - summary.Active.MaxTxBitrate = ToNumber(sum.MaxTxBitrate / 1024 / 1024) + summary.Active.MaxRxBitrate = json.ToNumber(sum.MaxRxBitrate / 1024 / 1024) + summary.Active.MaxTxBitrate = json.ToNumber(sum.MaxTxBitrate / 1024 / 1024) summary.Active.Sessions = sum.CurrentSessions - summary.Active.RxBitrate = ToNumber(sum.CurrentRxBitrate / 1024 / 1024) - summary.Active.TxBitrate = ToNumber(sum.CurrentTxBitrate / 1024 / 1024) + summary.Active.RxBitrate = json.ToNumber(sum.CurrentRxBitrate / 1024 / 1024) + summary.Active.TxBitrate = json.ToNumber(sum.CurrentTxBitrate / 1024 / 1024) summary.Active.SessionList = make([]Session, len(sum.Active)) diff --git a/http/client/client.go b/http/client/client.go index 7d8abc05..d73fab98 100644 --- a/http/client/client.go +++ b/http/client/client.go @@ -69,6 +69,7 @@ type RestClient interface { ProcessProbeConfig(config *app.Config) (api.Probe, error) // POST /v3/process/probe ProcessConfig(id app.ProcessID) (api.ProcessConfig, error) // GET /v3/process/{id}/config ProcessReport(id app.ProcessID) (api.ProcessReport, error) // GET /v3/process/{id}/report + ProcessReportSet(id app.ProcessID, report *app.Report) error // PUT /v3/process/{id}/report ProcessState(id app.ProcessID) (api.ProcessState, error) // GET /v3/process/{id}/state ProcessMetadata(id app.ProcessID, key string) (api.Metadata, error) // GET /v3/process/{id}/metadata/{key} ProcessMetadataSet(id app.ProcessID, key string, metadata api.Metadata) error // PUT /v3/process/{id}/metadata/{key} @@ -454,6 +455,10 @@ func New(config Config) (RestClient, error) { path: mustNewGlob("/v3/cluster/node/*/state"), constraint: mustNewConstraint("^16.14.0"), }, + { + path: mustNewGlob("/v3/process/*/report"), + constraint: mustNewConstraint("^16.20.0"), + }, }, "DELETE": { { diff --git a/http/client/process.go b/http/client/process.go index 76fd1883..38a41d18 100644 --- a/http/client/process.go +++ b/http/client/process.go @@ -69,8 +69,7 @@ func (r *restclient) ProcessAdd(p *app.Config, metadata map[string]interface{}) var buf bytes.Buffer config := api.ProcessConfig{} - config.Unmarshal(p) - config.Metadata = metadata + config.Unmarshal(p, metadata) e := json.NewEncoder(&buf) e.Encode(config) @@ -87,8 +86,7 @@ func (r *restclient) ProcessUpdate(id app.ProcessID, p *app.Config, metadata map var buf bytes.Buffer config := api.ProcessConfig{} - config.Unmarshal(p) - config.Metadata = metadata + config.Unmarshal(p, metadata) e := json.NewEncoder(&buf) e.Encode(config) @@ -104,15 +102,35 @@ func (r *restclient) ProcessUpdate(id app.ProcessID, p *app.Config, metadata map return nil } -func (r *restclient) ProcessDelete(id app.ProcessID) error { +func (r *restclient) ProcessReportSet(id app.ProcessID, report *app.Report) error { + var buf bytes.Buffer + + data := api.ProcessReport{} + data.Unmarshal(report) + + e := json.NewEncoder(&buf) + e.Encode(data) + query := &url.Values{} query.Set("domain", id.Domain) - r.call("DELETE", "/v3/process/"+url.PathEscape(id.ID), query, nil, "", nil) + _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/report", query, nil, "application/json", &buf) + if err != nil { + return err + } return nil } +func (r *restclient) ProcessDelete(id app.ProcessID) error { + query := &url.Values{} + query.Set("domain", id.Domain) + + _, err := r.call("DELETE", "/v3/process/"+url.PathEscape(id.ID), query, nil, "", nil) + + return err +} + func (r *restclient) ProcessCommand(id app.ProcessID, command string) error { var buf bytes.Buffer @@ -125,11 +143,8 @@ func (r *restclient) ProcessCommand(id app.ProcessID, command string) error { query.Set("domain", id.Domain) _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/command", query, nil, "application/json", &buf) - if err != nil { - return err - } - return nil + return err } func (r *restclient) ProcessMetadata(id app.ProcessID, key string) (api.Metadata, error) { @@ -164,11 +179,8 @@ func (r *restclient) ProcessMetadataSet(id app.ProcessID, key string, metadata a query.Set("domain", id.Domain) _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/metadata/"+url.PathEscape(key), query, nil, "application/json", &buf) - if err != nil { - return err - } - return nil + return err } func (r *restclient) ProcessProbe(id app.ProcessID) (api.Probe, error) { @@ -192,7 +204,7 @@ func (r *restclient) ProcessProbeConfig(p *app.Config) (api.Probe, error) { var buf bytes.Buffer config := api.ProcessConfig{} - config.Unmarshal(p) + config.Unmarshal(p, nil) e := json.NewEncoder(&buf) e.Encode(config) diff --git a/http/graph/resolver/resolver.go b/http/graph/resolver/resolver.go index 22f628d6..76aa3240 100644 --- a/http/graph/resolver/resolver.go +++ b/http/graph/resolver/resolver.go @@ -36,7 +36,7 @@ func (r *queryResolver) getProcess(id app.ProcessID) (*models.Process, error) { return nil, err } - report, err := r.Restream.GetProcessLog(id) + report, err := r.Restream.GetProcessReport(id) if err != nil { return nil, err } diff --git a/http/handler/api/cluster.go b/http/handler/api/cluster.go index a4e00639..10841647 100644 --- a/http/handler/api/cluster.go +++ b/http/handler/api/cluster.go @@ -118,8 +118,11 @@ func (h *ClusterHandler) marshalClusterNode(node cluster.ClusterNode) api.Cluste NCPU: node.Resources.NCPU, CPU: node.Resources.CPU, CPULimit: node.Resources.CPULimit, + CPUCore: node.Resources.CPUCore, Mem: node.Resources.Mem, MemLimit: node.Resources.MemLimit, + MemTotal: node.Resources.MemTotal, + MemCore: node.Resources.MemCore, }, } diff --git a/http/handler/api/cluster_iam.go b/http/handler/api/cluster_iam.go index 91ed30e0..3b177734 100644 --- a/http/handler/api/cluster_iam.go +++ b/http/handler/api/cluster_iam.go @@ -7,8 +7,8 @@ import ( "github.com/datarhei/core/v16/cluster/store" "github.com/datarhei/core/v16/http/api" "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/labstack/echo/v4" ) @@ -198,14 +198,14 @@ func (h *ClusterHandler) IAMIdentityUpdatePolicies(c echo.Context) error { } } - accessPolicies := []access.Policy{} + accessPolicies := []policy.Policy{} for _, p := range policies { if !h.iam.Enforce(ctxuser, p.Domain, "iam", iamuser.Name, "write") { return api.Err(http.StatusForbidden, "", "not allowed to write policy: %v", p) } - accessPolicies = append(accessPolicies, access.Policy{ + accessPolicies = append(accessPolicies, policy.Policy{ Name: name, Domain: p.Domain, Types: p.Types, diff --git a/http/handler/api/cluster_process.go b/http/handler/api/cluster_process.go index d4c12931..11b3894d 100644 --- a/http/handler/api/cluster_process.go +++ b/http/handler/api/cluster_process.go @@ -1,15 +1,12 @@ package api import ( - "bytes" "fmt" "net/http" - "strconv" "strings" "github.com/datarhei/core/v16/cluster/node" "github.com/datarhei/core/v16/cluster/store" - "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/glob" "github.com/datarhei/core/v16/http/api" "github.com/datarhei/core/v16/http/handler/util" @@ -89,53 +86,16 @@ func (h *ClusterHandler) ProcessList(c echo.Context) error { continue } - process := h.convertStoreProcessToAPIProcess(p, filter) + process := api.Process{} + process.UnmarshalStore(p, filter.config, filter.state, filter.report, filter.metadata) missing = append(missing, process) } } - // We're doing some byte-wrangling here because the processes from the nodes - // are of type clientapi.Process, the missing processes are from type api.Process. - // They are actually the same and converting them is cumbersome. That's why - // we're doing the JSON marshalling here and appending these two slices is done - // in JSON representation. + processes = append(processes, missing...) - data, err := json.Marshal(processes) - if err != nil { - return api.Err(http.StatusInternalServerError, "", err.Error()) - } - - buf := &bytes.Buffer{} - - if len(missing) != 0 { - reallyData, err := json.Marshal(missing) - if err != nil { - return api.Err(http.StatusInternalServerError, "", err.Error()) - } - - i := bytes.LastIndexByte(data, ']') - if i == -1 { - return api.Err(http.StatusInternalServerError, "", "no valid JSON") - } - - if len(processes) != 0 { - data[i] = ',' - } else { - data[i] = ' ' - } - buf.Write(data) - - i = bytes.IndexByte(reallyData, '[') - if i == -1 { - return api.Err(http.StatusInternalServerError, "", "no valid JSON") - } - buf.Write(reallyData[i+1:]) - } else { - buf.Write(data) - } - - return c.Stream(http.StatusOK, "application/json", buf) + return c.JSON(http.StatusOK, processes) } func (h *ClusterHandler) getFilteredStoreProcesses(processes []store.Process, wantids []string, _, reference, idpattern, refpattern, ownerpattern, domainpattern string) []store.Process { @@ -224,74 +184,6 @@ func (h *ClusterHandler) getFilteredStoreProcesses(processes []store.Process, wa return final } -func (h *ClusterHandler) convertStoreProcessToAPIProcess(p store.Process, filter filter) api.Process { - process := api.Process{ - ID: p.Config.ID, - Owner: p.Config.Owner, - Domain: p.Config.Domain, - Type: "ffmpeg", - Reference: p.Config.Reference, - CreatedAt: p.CreatedAt.Unix(), - UpdatedAt: p.UpdatedAt.Unix(), - } - - if filter.metadata { - process.Metadata = p.Metadata - } - - if filter.config { - config := &api.ProcessConfig{} - config.Unmarshal(p.Config) - - process.Config = config - } - - if filter.state { - process.State = &api.ProcessState{ - Order: p.Order, - LastLog: p.Error, - Resources: api.ProcessUsage{ - CPU: api.ProcessUsageCPU{ - NCPU: api.ToNumber(1), - Limit: api.ToNumber(p.Config.LimitCPU), - }, - Memory: api.ProcessUsageMemory{ - Limit: p.Config.LimitMemory, - }, - }, - Command: []string{}, - } - - if len(p.Error) != 0 { - process.State.State = "failed" - } else { - process.State.State = "finished" - } - } - - if filter.report { - process.Report = &api.ProcessReport{ - ProcessReportEntry: api.ProcessReportEntry{ - CreatedAt: p.CreatedAt.Unix(), - Prelude: []string{}, - Log: [][2]string{}, - Matches: []string{}, - }, - } - - if len(p.Error) != 0 { - process.Report.Prelude = []string{p.Error} - process.Report.Log = [][2]string{ - {strconv.FormatInt(p.CreatedAt.Unix(), 10), p.Error}, - } - process.Report.ExitedAt = p.CreatedAt.Unix() - process.Report.ExitState = "failed" - } - } - - return process -} - // ProcessGet returns the process with the given ID whereever it's running on the cluster // @Summary List a process by its ID // @Description List a process by its ID. Use the filter parameter to specifiy the level of detail of the output. @@ -316,29 +208,27 @@ func (h *ClusterHandler) ProcessGet(c echo.Context) error { return api.Err(http.StatusForbidden, "") } - procs := h.proxy.ProcessList(node.ProcessListOptions{ - ID: []string{id}, - Filter: filter.Slice(), - Domain: domain, - }) - - if len(procs) == 0 { - // Check the store in the cluster for an undeployed process - p, err := h.cluster.Store().ProcessGet(app.NewProcessID(id, domain)) - if err != nil { - return api.Err(http.StatusNotFound, "", "Unknown process ID: %s", id) - } - - process := h.convertStoreProcessToAPIProcess(p, filter) + pid := app.NewProcessID(id, domain) - return c.JSON(http.StatusOK, process) + // Check the store for the process + // TODO: should check the leader because in larger cluster it needs time to get to all followers + p, nodeid, err := h.cluster.ProcessGet("", pid, false) + if err != nil { + return api.Err(http.StatusNotFound, "", "process not found: %s in domain '%s'", pid.ID, pid.Domain) } - if procs[0].Domain != domain { - return api.Err(http.StatusNotFound, "", "Unknown process ID: %s", id) + process := api.Process{} + process.UnmarshalStore(p, filter.config, filter.state, filter.report, filter.metadata) + + // Get the actual process data + if len(nodeid) != 0 { + process, err = h.proxy.ProcessGet(nodeid, pid, filter.Slice()) + if err != nil { + return api.Err(http.StatusNotFound, "", "process not found: %s in domain '%s'", pid.ID, pid.Domain) + } } - return c.JSON(http.StatusOK, procs[0]) + return c.JSON(http.StatusOK, process) } // Add adds a new process to the cluster @@ -436,13 +326,13 @@ func (h *ClusterHandler) ProcessUpdate(c echo.Context) error { pid := process.ProcessID() - current, err := h.cluster.Store().ProcessGet(pid) + current, _, err := h.cluster.ProcessGet("", pid, false) if err != nil { return api.Err(http.StatusNotFound, "", "process not found: %s in domain '%s'", pid.ID, pid.Domain) } // Prefill the config with the current values - process.Unmarshal(current.Config) + process.Unmarshal(current.Config, nil) if err := util.ShouldBindJSON(c, &process); err != nil { return api.Err(http.StatusBadRequest, "", "invalid JSON: %s", err.Error()) @@ -641,7 +531,7 @@ func (h *ClusterHandler) ProcessProbe(c echo.Context) error { Domain: domain, } - nodeid, err := h.proxy.ProcessFindNodeID(pid) + nodeid, err := h.cluster.Store().ProcessGetNode(pid) if err != nil { return c.JSON(http.StatusOK, api.Probe{ Log: []string{fmt.Sprintf("the process can't be found: %s", err.Error())}, diff --git a/http/handler/api/cluster_store.go b/http/handler/api/cluster_store.go index 8d0c7349..59eeadae 100644 --- a/http/handler/api/cluster_store.go +++ b/http/handler/api/cluster_store.go @@ -33,7 +33,8 @@ func (h *ClusterHandler) StoreListProcesses(c echo.Context) error { continue } - process := h.convertStoreProcessToAPIProcess(p, newFilter("")) + process := api.Process{} + process.UnmarshalStore(p, true, true, true, true) processes = append(processes, process) } @@ -66,12 +67,13 @@ func (h *ClusterHandler) StoreGetProcess(c echo.Context) error { return api.Err(http.StatusForbidden, "", "API user %s is not allowed to read this process", ctxuser) } - p, err := h.cluster.Store().ProcessGet(pid) + p, _, err := h.cluster.Store().ProcessGet(pid) if err != nil { return api.Err(http.StatusNotFound, "", "process not found: %s in domain '%s'", pid.ID, pid.Domain) } - process := h.convertStoreProcessToAPIProcess(p, newFilter("")) + process := api.Process{} + process.UnmarshalStore(p, true, true, true, true) return c.JSON(http.StatusOK, process) } diff --git a/http/handler/api/config_test.go b/http/handler/api/config_test.go index 59aca767..0757cfb0 100644 --- a/http/handler/api/config_test.go +++ b/http/handler/api/config_test.go @@ -22,10 +22,10 @@ func getDummyConfigRouter(t *testing.T) (*echo.Echo, store.Store) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - _, _, err = memfs.WriteFileReader("./mime.types", strings.NewReader("xxxxx")) + _, _, err = memfs.WriteFileReader("./mime.types", strings.NewReader("xxxxx"), -1) require.NoError(t, err) - _, _, err = memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx")) + _, _, err = memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx"), -1) require.NoError(t, err) config, err := store.NewJSON(memfs, "/config.json", nil) diff --git a/http/handler/api/events.go b/http/handler/api/events.go index decb146d..bec1b2e2 100644 --- a/http/handler/api/events.go +++ b/http/handler/api/events.go @@ -113,7 +113,7 @@ func (h *EventsHandler) Events(c echo.Context) error { res.Write([]byte(":keepalive\n\n")) res.Flush() case e := <-evts: - event.Marshal(&e) + event.Unmarshal(&e) if !filterEvent(&event) { continue @@ -141,7 +141,7 @@ func (h *EventsHandler) Events(c echo.Context) error { res.Write([]byte("{\"event\": \"keepalive\"}\n")) res.Flush() case e := <-evts: - event.Marshal(&e) + event.Unmarshal(&e) if !filterEvent(&event) { continue diff --git a/http/handler/api/filesystems.go b/http/handler/api/filesystems.go index f14c4f8d..fa43ba4d 100644 --- a/http/handler/api/filesystems.go +++ b/http/handler/api/filesystems.go @@ -263,7 +263,7 @@ func (h *FSHandler) FileOperation(c echo.Context) error { // In case the target is S3, allow it to determine the size of the file sizer := fs.NewReadSizer(reader, fromFileStat.Size()) - _, _, err = toFS.Handler.FS.Filesystem.WriteFileReader(toPath, sizer) + _, _, err = toFS.Handler.FS.Filesystem.WriteFileReader(toPath, sizer, int(sizer.Size())) if err != nil { toFS.Handler.FS.Filesystem.Remove(toPath) return api.Err(http.StatusBadRequest, "", "writing target file failed: %s", err) diff --git a/http/handler/api/filesystems_test.go b/http/handler/api/filesystems_test.go index 35de0314..edfbec86 100644 --- a/http/handler/api/filesystems_test.go +++ b/http/handler/api/filesystems_test.go @@ -149,10 +149,10 @@ func TestFilesystemsListSize(t *testing.T) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - memfs.WriteFileReader("/a", strings.NewReader("a")) - memfs.WriteFileReader("/aa", strings.NewReader("aa")) - memfs.WriteFileReader("/aaa", strings.NewReader("aaa")) - memfs.WriteFileReader("/aaaa", strings.NewReader("aaaa")) + memfs.WriteFileReader("/a", strings.NewReader("a"), -1) + memfs.WriteFileReader("/aa", strings.NewReader("aa"), -1) + memfs.WriteFileReader("/aaa", strings.NewReader("aaa"), -1) + memfs.WriteFileReader("/aaaa", strings.NewReader("aaaa"), -1) filesystems := []httpfs.FS{ { @@ -207,13 +207,13 @@ func TestFilesystemsListLastmod(t *testing.T) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - memfs.WriteFileReader("/a", strings.NewReader("a")) + memfs.WriteFileReader("/a", strings.NewReader("a"), -1) time.Sleep(1 * time.Second) - memfs.WriteFileReader("/b", strings.NewReader("b")) + memfs.WriteFileReader("/b", strings.NewReader("b"), -1) time.Sleep(1 * time.Second) - memfs.WriteFileReader("/c", strings.NewReader("c")) + memfs.WriteFileReader("/c", strings.NewReader("c"), -1) time.Sleep(1 * time.Second) - memfs.WriteFileReader("/d", strings.NewReader("d")) + memfs.WriteFileReader("/d", strings.NewReader("d"), -1) var a, b, c, d time.Time @@ -274,10 +274,10 @@ func TestFilesystemsDeleteFiles(t *testing.T) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - memfs.WriteFileReader("/a", strings.NewReader("a")) - memfs.WriteFileReader("/aa", strings.NewReader("aa")) - memfs.WriteFileReader("/aaa", strings.NewReader("aaa")) - memfs.WriteFileReader("/aaaa", strings.NewReader("aaaa")) + memfs.WriteFileReader("/a", strings.NewReader("a"), -1) + memfs.WriteFileReader("/aa", strings.NewReader("aa"), -1) + memfs.WriteFileReader("/aaa", strings.NewReader("aaa"), -1) + memfs.WriteFileReader("/aaaa", strings.NewReader("aaaa"), -1) filesystems := []httpfs.FS{ { @@ -312,10 +312,10 @@ func TestFilesystemsDeleteFilesSize(t *testing.T) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - memfs.WriteFileReader("/a", strings.NewReader("a")) - memfs.WriteFileReader("/aa", strings.NewReader("aa")) - memfs.WriteFileReader("/aaa", strings.NewReader("aaa")) - memfs.WriteFileReader("/aaaa", strings.NewReader("aaaa")) + memfs.WriteFileReader("/a", strings.NewReader("a"), -1) + memfs.WriteFileReader("/aa", strings.NewReader("aa"), -1) + memfs.WriteFileReader("/aaa", strings.NewReader("aaa"), -1) + memfs.WriteFileReader("/aaaa", strings.NewReader("aaaa"), -1) filesystems := []httpfs.FS{ { @@ -348,13 +348,13 @@ func TestFilesystemsDeleteFilesLastmod(t *testing.T) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) - memfs.WriteFileReader("/a", strings.NewReader("a")) + memfs.WriteFileReader("/a", strings.NewReader("a"), -1) time.Sleep(1 * time.Second) - memfs.WriteFileReader("/b", strings.NewReader("b")) + memfs.WriteFileReader("/b", strings.NewReader("b"), -1) time.Sleep(1 * time.Second) - memfs.WriteFileReader("/c", strings.NewReader("c")) + memfs.WriteFileReader("/c", strings.NewReader("c"), -1) time.Sleep(1 * time.Second) - memfs.WriteFileReader("/d", strings.NewReader("d")) + memfs.WriteFileReader("/d", strings.NewReader("d"), -1) var b, c time.Time diff --git a/http/handler/api/fixtures/addProcess.json b/http/handler/api/fixtures/addProcess.json index b967e245..73994f03 100644 --- a/http/handler/api/fixtures/addProcess.json +++ b/http/handler/api/fixtures/addProcess.json @@ -25,8 +25,9 @@ ] } ], + "log_patterns": [], "autostart": false, "reconnect": true, "reconnect_delay_seconds": 10, "stale_timeout_seconds": 10 -} +} \ No newline at end of file diff --git a/http/handler/api/process.go b/http/handler/api/process.go index 0ec907f1..70d381da 100644 --- a/http/handler/api/process.go +++ b/http/handler/api/process.go @@ -1,8 +1,8 @@ package api import ( + "fmt" "net/http" - "runtime" "sort" "strconv" "strings" @@ -155,7 +155,7 @@ func (h *ProcessHandler) GetAll(c echo.Context) error { wg := sync.WaitGroup{} - for i := 0; i < runtime.NumCPU(); i++ { + for i := 0; i < 8; /*runtime.NumCPU()*/ i++ { wg.Add(1) go func(idChan <-chan app.ProcessID) { @@ -315,7 +315,7 @@ func (h *ProcessHandler) Update(c echo.Context) error { } // Prefill the config with the current values - process.Unmarshal(current.Config) + process.Unmarshal(current.Config, nil) if err := util.ShouldBindJSON(c, &process); err != nil { return api.Err(http.StatusBadRequest, "", "invalid JSON: %s", err.Error()) @@ -333,11 +333,6 @@ func (h *ProcessHandler) Update(c echo.Context) error { config, metadata := process.Marshal() - tid = app.ProcessID{ - ID: id, - Domain: domain, - } - if err := h.restream.UpdateProcess(tid, config); err != nil { if err == restream.ErrUnknownProcess { return api.Err(http.StatusNotFound, "", "process not found: %s", id) @@ -355,9 +350,7 @@ func (h *ProcessHandler) Update(c echo.Context) error { h.restream.SetProcessMetadata(tid, key, data) } - p, _ := h.getProcess(tid, newFilter("config")) - - return c.JSON(http.StatusOK, p.Config) + return c.JSON(http.StatusOK, process) } // Command issues a command to a process @@ -450,7 +443,7 @@ func (h *ProcessHandler) GetConfig(c echo.Context) error { } config := api.ProcessConfig{} - config.Unmarshal(p.Config) + config.Unmarshal(p.Config, nil) return c.JSON(http.StatusOK, config) } @@ -545,7 +538,7 @@ func (h *ProcessHandler) GetReport(c echo.Context) error { Domain: domain, } - l, err := h.restream.GetProcessLog(tid) + l, err := h.restream.GetProcessReport(tid) if err != nil { return api.Err(http.StatusNotFound, "", "unknown process ID: %s", err.Error()) } @@ -560,13 +553,15 @@ func (h *ProcessHandler) GetReport(c echo.Context) error { filteredReport := api.ProcessReport{} // Add the current report as a fake history entry - report.History = append(report.History, api.ProcessReportEntry{ - CreatedAt: report.CreatedAt, - Prelude: report.Prelude, - Log: report.Log, + report.History = append(report.History, api.ProcessReportHistoryEntry{ + ProcessReportEntry: api.ProcessReportEntry{ + CreatedAt: report.CreatedAt, + Prelude: report.Prelude, + Log: report.Log, + }, }) - entries := []api.ProcessReportEntry{} + entries := []api.ProcessReportHistoryEntry{} for _, r := range report.History { if createdAt != nil && exitedAt == nil { @@ -606,6 +601,53 @@ func (h *ProcessHandler) GetReport(c echo.Context) error { return c.JSON(http.StatusOK, filteredReport) } +// SetReport sets the report history of a process +// @Summary Set the report history a process +// @Description Set the report history a process +// @Tags v16.?.? +// @ID process-3-set-report +// @Accept json +// @Produce json +// @Param id path string true "Process ID" +// @Param domain query string false "Domain to act on" +// @Param report body api.ProcessReport true "Process report" +// @Success 200 {string} string +// @Failure 400 {object} api.Error +// @Failure 403 {object} api.Error +// @Failure 404 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/process/{id}/report [put] +func (h *ProcessHandler) SetReport(c echo.Context) error { + ctxuser := util.DefaultContext(c, "user", "") + domain := util.DefaultQuery(c, "domain", "") + id := util.PathParam(c, "id") + + fmt.Printf("entering SetReport handler\n") + + if !h.iam.Enforce(ctxuser, domain, "process", id, "write") { + return api.Err(http.StatusForbidden, "", "You are not allowed to write this process: %s", id) + } + + tid := app.ProcessID{ + ID: id, + Domain: domain, + } + + report := api.ProcessReport{} + + if err := util.ShouldBindJSON(c, &report); err != nil { + return api.Err(http.StatusBadRequest, "", "invalid JSON: %s", err.Error()) + } + + appreport := report.Marshal() + + if err := h.restream.SetProcessReport(tid, &appreport); err != nil { + return api.Err(http.StatusNotFound, "", "unknown process ID: %s", err.Error()) + } + + return c.JSON(http.StatusOK, "OK") +} + // SearchReportHistory returns a list of matching report references // @Summary Search log history of all processes // @Description Search log history of all processes by providing patterns for process IDs and references, a state and a time range. All are optional. @@ -1004,50 +1046,40 @@ func (h *ProcessHandler) getProcess(id app.ProcessID, filter filter) (api.Proces return api.Process{}, err } - info := api.Process{ - ID: process.ID, - Owner: process.Owner, - Domain: process.Domain, - Reference: process.Reference, - Type: "ffmpeg", - CoreID: h.restream.ID(), - CreatedAt: process.CreatedAt, - UpdatedAt: process.UpdatedAt, - } + var config *app.Config + var state *app.State + var report *app.Report + var metadata interface{} if filter.config { - info.Config = &api.ProcessConfig{} - info.Config.Unmarshal(process.Config) + config = process.Config } if filter.state { - state, err := h.restream.GetProcessState(id) + state, err = h.restream.GetProcessState(id) if err != nil { return api.Process{}, err } - - info.State = &api.ProcessState{} - info.State.Unmarshal(state) } if filter.report { - log, err := h.restream.GetProcessLog(id) + report, err = h.restream.GetProcessReport(id) if err != nil { return api.Process{}, err } - - info.Report = &api.ProcessReport{} - info.Report.Unmarshal(log) } if filter.metadata { - data, err := h.restream.GetProcessMetadata(id, "") + metadata, err = h.restream.GetProcessMetadata(id, "") if err != nil { return api.Process{}, err } + } - info.Metadata = api.NewMetadata(data) + info := api.Process{ + CoreID: h.restream.ID(), } + info.Unmarshal(process, config, state, report, metadata) return info, nil } diff --git a/http/handler/api/process_test.go b/http/handler/api/process_test.go index fd94b385..bf076af3 100644 --- a/http/handler/api/process_test.go +++ b/http/handler/api/process_test.go @@ -12,8 +12,8 @@ import ( "github.com/datarhei/core/v16/http/api" "github.com/datarhei/core/v16/http/mock" "github.com/datarhei/core/v16/iam" - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/io/fs" "github.com/labstack/echo/v4" @@ -37,7 +37,7 @@ func getDummyRestreamHandler() (*ProcessHandler, error) { return nil, fmt.Errorf("failed to create memory filesystem: %w", err) } - policyAdapter, err := access.NewJSONAdapter(memfs, "./policy.json", nil) + policyAdapter, err := policy.NewJSONAdapter(memfs, "./policy.json", nil) if err != nil { return nil, err } diff --git a/http/handler/filesystem.go b/http/handler/filesystem.go index 0700e58d..88d5c151 100644 --- a/http/handler/filesystem.go +++ b/http/handler/filesystem.go @@ -115,9 +115,9 @@ func (h *FSHandler) GetFile(c echo.Context) error { } c.Response().Header().Set("Content-Range", ranges[0].contentRange(stat.Size())) - streamFile = &limitReader{ - r: streamFile, - size: int(ranges[0].length), + streamFile = &io.LimitedReader{ + R: streamFile, + N: ranges[0].length, } status = http.StatusPartialContent @@ -134,7 +134,7 @@ func (h *FSHandler) PutFile(c echo.Context) error { req := c.Request() - _, created, err := h.FS.Filesystem.WriteFileReader(path, req.Body) + _, created, err := h.FS.Filesystem.WriteFileReader(path, req.Body, -1) if err != nil { return api.Err(http.StatusBadRequest, "", "%s", err.Error()) } @@ -330,32 +330,6 @@ func (h *FSHandler) ListFiles(c echo.Context) error { return c.JSON(http.StatusOK, fileinfos) } -type limitReader struct { - r io.Reader - size int -} - -func (l *limitReader) Read(p []byte) (int, error) { - if l.size == 0 { - return 0, io.EOF - } - - len := len(p) - - if len > l.size { - p = p[:l.size] - } - - i, err := l.r.Read(p) - if err != nil { - return i, err - } - - l.size -= i - - return i, nil -} - // From: github.com/golang/go/net/http/fs.go@7dc9fcb // errNoOverlap is returned by serveContent's parseRange if first-byte-pos of diff --git a/http/middleware/iam/iam_test.go b/http/middleware/iam/iam_test.go index 621c5bee..62a495b0 100644 --- a/http/middleware/iam/iam_test.go +++ b/http/middleware/iam/iam_test.go @@ -14,8 +14,8 @@ import ( apihandler "github.com/datarhei/core/v16/http/handler/api" "github.com/datarhei/core/v16/http/validator" "github.com/datarhei/core/v16/iam" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/io/fs" "github.com/labstack/echo/v4" @@ -30,7 +30,7 @@ func getIAM() (iam.IAM, error) { return nil, err } - policyAdapter, err := iamaccess.NewJSONAdapter(dummyfs, "./policy.json", nil) + policyAdapter, err := policy.NewJSONAdapter(dummyfs, "./policy.json", nil) if err != nil { return nil, err } diff --git a/http/server.go b/http/server.go index f58b04d0..adde251a 100644 --- a/http/server.go +++ b/http/server.go @@ -121,7 +121,7 @@ type server struct { v3handler struct { log *api.LogHandler events *api.EventsHandler - restream *api.ProcessHandler + process *api.ProcessHandler rtmp *api.RTMPHandler srt *api.SRTHandler config *api.ConfigHandler @@ -267,7 +267,7 @@ func NewServer(config Config) (serverhandler.Server, error) { ) if config.Restream != nil { - s.v3handler.restream = api.NewProcess( + s.v3handler.process = api.NewProcess( config.Restream, config.IAM, ) @@ -618,49 +618,50 @@ func (s *server) setRoutesV3(v3 *echo.Group) { } // v3 Restreamer - if s.v3handler.restream != nil { - v3.GET("/skills", s.v3handler.restream.Skills) - v3.GET("/skills/reload", s.v3handler.restream.ReloadSkills) + if s.v3handler.process != nil { + v3.GET("/skills", s.v3handler.process.Skills) + v3.GET("/skills/reload", s.v3handler.process.ReloadSkills) - v3.GET("/process", s.v3handler.restream.GetAll) - v3.GET("/process/:id", s.v3handler.restream.Get) + v3.GET("/process", s.v3handler.process.GetAll) + v3.GET("/process/:id", s.v3handler.process.Get) - v3.GET("/process/:id/config", s.v3handler.restream.GetConfig) - v3.GET("/process/:id/state", s.v3handler.restream.GetState) - v3.GET("/process/:id/report", s.v3handler.restream.GetReport) + v3.GET("/process/:id/config", s.v3handler.process.GetConfig) + v3.GET("/process/:id/state", s.v3handler.process.GetState) + v3.GET("/process/:id/report", s.v3handler.process.GetReport) - v3.GET("/process/:id/metadata", s.v3handler.restream.GetProcessMetadata) - v3.GET("/process/:id/metadata/:key", s.v3handler.restream.GetProcessMetadata) + v3.GET("/process/:id/metadata", s.v3handler.process.GetProcessMetadata) + v3.GET("/process/:id/metadata/:key", s.v3handler.process.GetProcessMetadata) - v3.GET("/metadata", s.v3handler.restream.GetMetadata) - v3.GET("/metadata/:key", s.v3handler.restream.GetMetadata) + v3.GET("/metadata", s.v3handler.process.GetMetadata) + v3.GET("/metadata/:key", s.v3handler.process.GetMetadata) if !s.readOnly { - v3.POST("/process/probe", s.v3handler.restream.ProbeConfig) - v3.GET("/process/:id/probe", s.v3handler.restream.Probe) - v3.POST("/process", s.v3handler.restream.Add) - v3.PUT("/process/:id", s.v3handler.restream.Update) - v3.DELETE("/process/:id", s.v3handler.restream.Delete) - v3.PUT("/process/:id/command", s.v3handler.restream.Command) - v3.PUT("/process/:id/metadata/:key", s.v3handler.restream.SetProcessMetadata) - v3.PUT("/metadata/:key", s.v3handler.restream.SetMetadata) + v3.POST("/process/probe", s.v3handler.process.ProbeConfig) + v3.GET("/process/:id/probe", s.v3handler.process.Probe) + v3.POST("/process", s.v3handler.process.Add) + v3.PUT("/process/:id", s.v3handler.process.Update) + v3.DELETE("/process/:id", s.v3handler.process.Delete) + v3.PUT("/process/:id/command", s.v3handler.process.Command) + v3.PUT("/process/:id/report", s.v3handler.process.SetReport) + v3.PUT("/process/:id/metadata/:key", s.v3handler.process.SetProcessMetadata) + v3.PUT("/metadata/:key", s.v3handler.process.SetMetadata) } // v3 Playout - v3.GET("/process/:id/playout/:inputid/status", s.v3handler.restream.PlayoutStatus) - v3.GET("/process/:id/playout/:inputid/reopen", s.v3handler.restream.PlayoutReopenInput) - v3.GET("/process/:id/playout/:inputid/keyframe/*", s.v3handler.restream.PlayoutKeyframe) - v3.GET("/process/:id/playout/:inputid/errorframe/encode", s.v3handler.restream.PlayoutEncodeErrorframe) + v3.GET("/process/:id/playout/:inputid/status", s.v3handler.process.PlayoutStatus) + v3.GET("/process/:id/playout/:inputid/reopen", s.v3handler.process.PlayoutReopenInput) + v3.GET("/process/:id/playout/:inputid/keyframe/*", s.v3handler.process.PlayoutKeyframe) + v3.GET("/process/:id/playout/:inputid/errorframe/encode", s.v3handler.process.PlayoutEncodeErrorframe) if !s.readOnly { - v3.PUT("/process/:id/playout/:inputid/errorframe/*", s.v3handler.restream.PlayoutSetErrorframe) - v3.POST("/process/:id/playout/:inputid/errorframe/*", s.v3handler.restream.PlayoutSetErrorframe) + v3.PUT("/process/:id/playout/:inputid/errorframe/*", s.v3handler.process.PlayoutSetErrorframe) + v3.POST("/process/:id/playout/:inputid/errorframe/*", s.v3handler.process.PlayoutSetErrorframe) - v3.PUT("/process/:id/playout/:inputid/stream", s.v3handler.restream.PlayoutSetStream) + v3.PUT("/process/:id/playout/:inputid/stream", s.v3handler.process.PlayoutSetStream) } // v3 Report - v3.GET("/report/process", s.v3handler.restream.SearchReportHistory) + v3.GET("/report/process", s.v3handler.process.SearchReportHistory) } // v3 Filesystems diff --git a/iam/access/access.go b/iam/access/access.go deleted file mode 100644 index 13d9c088..00000000 --- a/iam/access/access.go +++ /dev/null @@ -1,182 +0,0 @@ -package access - -import ( - "sort" - "strings" - - "github.com/datarhei/core/v16/log" - - "github.com/casbin/casbin/v2" - "github.com/casbin/casbin/v2/model" -) - -type Policy struct { - Name string - Domain string - Types []string - Resource string - Actions []string -} - -type Enforcer interface { - Enforce(name, domain, rtype, resource, action string) (bool, string) - - HasDomain(name string) bool - ListDomains() []string -} - -type Manager interface { - Enforcer - - HasPolicy(name, domain string, types []string, resource string, actions []string) bool - AddPolicy(name, domain string, types []string, resource string, actions []string) error - RemovePolicy(name, domain string, types []string, resource string, actions []string) error - ListPolicies(name, domain string, types []string, resource string, actions []string) []Policy - ReloadPolicies() error -} - -type access struct { - logger log.Logger - - adapter Adapter - model model.Model - enforcer *casbin.SyncedEnforcer -} - -type Config struct { - Adapter Adapter - Logger log.Logger -} - -func New(config Config) (Manager, error) { - am := &access{ - adapter: config.Adapter, - logger: config.Logger, - } - - if am.logger == nil { - am.logger = log.New("") - } - - m := model.NewModel() - m.AddDef("r", "r", "sub, dom, obj, act") - m.AddDef("p", "p", "sub, dom, obj, act") - m.AddDef("g", "g", "_, _, _") - m.AddDef("e", "e", "some(where (p.eft == allow))") - m.AddDef("m", "m", `g(r.sub, p.sub, r.dom) && r.dom == p.dom && ResourceMatch(r.obj, p.obj) && ActionMatch(r.act, p.act) || r.sub == "$superuser"`) - - e, err := casbin.NewSyncedEnforcer(m, am.adapter) - if err != nil { - return nil, err - } - - e.AddFunction("ResourceMatch", resourceMatchFunc) - e.AddFunction("ActionMatch", actionMatchFunc) - - am.enforcer = e - am.model = m - - return am, nil -} - -func (am *access) HasPolicy(name, domain string, types []string, resource string, actions []string) bool { - policy := []string{name, domain, EncodeResource(types, resource), EncodeActions(actions)} - - hasPolicy, _ := am.enforcer.HasPolicy(policy) - - return hasPolicy -} - -func (am *access) AddPolicy(name, domain string, types []string, resource string, actions []string) error { - policy := []string{name, domain, EncodeResource(types, resource), EncodeActions(actions)} - - if hasPolicy, _ := am.enforcer.HasPolicy(policy); hasPolicy { - return nil - } - - _, err := am.enforcer.AddPolicy(policy) - - return err -} - -func (am *access) RemovePolicy(name, domain string, types []string, resource string, actions []string) error { - policies, err := am.enforcer.GetFilteredPolicy(0, name, domain, EncodeResource(types, resource), EncodeActions(actions)) - if err != nil { - return err - } - - _, err = am.enforcer.RemovePolicies(policies) - - return err -} - -func (am *access) ListPolicies(name, domain string, types []string, resource string, actions []string) []Policy { - policies := []Policy{} - - ps, err := am.enforcer.GetFilteredPolicy(0, name, domain, EncodeResource(types, resource), EncodeActions(actions)) - if err != nil { - return policies - } - - for _, p := range ps { - types, resource := DecodeResource(p[2]) - policies = append(policies, Policy{ - Name: p[0], - Domain: p[1], - Types: types, - Resource: resource, - Actions: DecodeActions(p[3]), - }) - } - - return policies -} - -func (am *access) ReloadPolicies() error { - am.enforcer.ClearPolicy() - - return am.enforcer.LoadPolicy() -} - -func (am *access) HasDomain(name string) bool { - return am.adapter.HasDomain(name) -} - -func (am *access) ListDomains() []string { - return am.adapter.AllDomains() -} - -func (am *access) Enforce(name, domain, rtype, resource, action string) (bool, string) { - resource = rtype + ":" + resource - - ok, rule, _ := am.enforcer.EnforceEx(name, domain, resource, action) - - return ok, strings.Join(rule, ", ") -} - -func EncodeActions(actions []string) string { - return strings.Join(actions, "|") -} - -func DecodeActions(actions string) []string { - return strings.Split(actions, "|") -} - -func EncodeResource(types []string, resource string) string { - if len(types) == 0 { - return resource - } - - sort.Strings(types) - - return strings.Join(types, "|") + ":" + resource -} - -func DecodeResource(resource string) ([]string, string) { - before, after, found := strings.Cut(resource, ":") - if !found { - return []string{"$none"}, resource - } - - return strings.Split(before, "|"), after -} diff --git a/iam/access/adapter.go b/iam/access/adapter.go deleted file mode 100644 index 308bd578..00000000 --- a/iam/access/adapter.go +++ /dev/null @@ -1,588 +0,0 @@ -package access - -import ( - "errors" - "fmt" - "sort" - "strings" - "sync" - - "github.com/datarhei/core/v16/encoding/json" - "github.com/datarhei/core/v16/io/fs" - "github.com/datarhei/core/v16/log" - - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" -) - -// Adapter is the file adapter for Casbin. -// It can load policy from file or save policy to file. -type adapter struct { - fs fs.Filesystem - filePath string - logger log.Logger - domains []Domain - lock sync.Mutex -} - -type Adapter interface { - persist.BatchAdapter - - AllDomains() []string - HasDomain(string) bool -} - -func NewJSONAdapter(fs fs.Filesystem, filePath string, logger log.Logger) (Adapter, error) { - a := &adapter{ - fs: fs, - filePath: filePath, - logger: logger, - } - - if a.fs == nil { - return nil, fmt.Errorf("a filesystem has to be provided") - } - - if len(a.filePath) == 0 { - return nil, fmt.Errorf("invalid file path, file path cannot be empty") - } - - if a.logger == nil { - a.logger = log.New("") - } - - return a, nil -} - -// Adapter -func (a *adapter) LoadPolicy(model model.Model) error { - a.lock.Lock() - defer a.lock.Unlock() - - return a.loadPolicyFile(model) -} - -func (a *adapter) loadPolicyFile(model model.Model) error { - if _, err := a.fs.Stat(a.filePath); err != nil { - if errors.Is(err, fs.ErrNotExist) { - a.domains = []Domain{} - return nil - } - - return err - } - - data, err := a.fs.ReadFile(a.filePath) - if err != nil { - return err - } - - domains := []Domain{} - - err = json.Unmarshal(data, &domains) - if err != nil { - return err - } - - rule := [5]string{} - for _, domain := range domains { - rule[0] = "p" - rule[2] = domain.Name - for name, roles := range domain.Roles { - rule[1] = "role:" + name - for _, role := range roles { - rule[3] = role.Resource - rule[4] = formatList(role.Actions) - - if err := a.importPolicy(model, rule[0:5]); err != nil { - return err - } - } - } - - for _, policy := range domain.Policies { - rule[1] = policy.Username - rule[3] = policy.Resource - rule[4] = formatList(policy.Actions) - - if err := a.importPolicy(model, rule[0:5]); err != nil { - return err - } - } - - rule[0] = "g" - rule[3] = domain.Name - - for _, ug := range domain.UserRoles { - rule[1] = ug.Username - rule[2] = "role:" + ug.Role - - if err := a.importPolicy(model, rule[0:4]); err != nil { - return err - } - } - } - - a.domains = domains - - return nil -} - -func (a *adapter) importPolicy(model model.Model, rule []string) error { - copiedRule := make([]string, len(rule)) - copy(copiedRule, rule) - - a.logger.Debug().WithFields(log.Fields{ - "subject": copiedRule[1], - "domain": copiedRule[2], - "resource": copiedRule[3], - "actions": copiedRule[4], - }).Log("Imported policy") - - ok, err := model.HasPolicyEx(copiedRule[0], copiedRule[0], copiedRule[1:]) - if err != nil { - return err - } - if ok { - return nil // skip duplicated policy - } - - model.AddPolicy(copiedRule[0], copiedRule[0], copiedRule[1:]) - - return nil -} - -// Adapter -func (a *adapter) SavePolicy(model model.Model) error { - a.lock.Lock() - defer a.lock.Unlock() - - return a.savePolicyFile() -} - -func (a *adapter) savePolicyFile() error { - jsondata, err := json.MarshalIndent(a.domains, "", " ") - if err != nil { - return err - } - - _, _, err = a.fs.WriteFileSafe(a.filePath, jsondata) - - return err -} - -// Adapter (auto-save) -func (a *adapter) AddPolicy(sec, ptype string, rule []string) error { - a.lock.Lock() - defer a.lock.Unlock() - - err := a.addPolicy(ptype, rule) - if err != nil { - return err - } - - return a.savePolicyFile() -} - -// BatchAdapter (auto-save) -func (a *adapter) AddPolicies(sec string, ptype string, rules [][]string) error { - a.lock.Lock() - defer a.lock.Unlock() - - for _, rule := range rules { - err := a.addPolicy(ptype, rule) - if err != nil { - return err - } - } - - return a.savePolicyFile() -} - -func (a *adapter) addPolicy(ptype string, rule []string) error { - ok, err := a.hasPolicy(ptype, rule) - if err != nil { - return err - } - - if ok { - // the policy is already there, nothing to add - return nil - } - - username := "" - role := "" - domain := "" - resource := "" - actions := "" - - if ptype == "p" { - username = rule[0] - domain = rule[1] - resource = rule[2] - actions = formatList(rule[3]) - - a.logger.Debug().WithFields(log.Fields{ - "subject": username, - "domain": domain, - "resource": resource, - "actions": actions, - }).Log("Adding policy") - } else if ptype == "g" { - username = rule[0] - role = rule[1] - domain = rule[2] - - a.logger.Debug().WithFields(log.Fields{ - "subject": username, - "role": role, - "domain": domain, - }).Log("Adding role mapping") - } else { - return fmt.Errorf("unknown ptype: %s", ptype) - } - - var dom *Domain = nil - for i := range a.domains { - if a.domains[i].Name == domain { - dom = &a.domains[i] - break - } - } - - if dom == nil { - g := Domain{ - Name: domain, - Roles: map[string][]Role{}, - UserRoles: []MapUserRole{}, - Policies: []DomainPolicy{}, - } - - a.domains = append(a.domains, g) - dom = &a.domains[len(a.domains)-1] - } - - if ptype == "p" { - if strings.HasPrefix(username, "role:") { - if dom.Roles == nil { - dom.Roles = make(map[string][]Role) - } - - role := strings.TrimPrefix(username, "role:") - dom.Roles[role] = append(dom.Roles[role], Role{ - Resource: resource, - Actions: actions, - }) - } else { - dom.Policies = append(dom.Policies, DomainPolicy{ - Username: username, - Role: Role{ - Resource: resource, - Actions: actions, - }, - }) - } - } else { - dom.UserRoles = append(dom.UserRoles, MapUserRole{ - Username: username, - Role: strings.TrimPrefix(role, "role:"), - }) - } - - return nil -} - -func (a *adapter) hasPolicy(ptype string, rule []string) (bool, error) { - var username string - var role string - var domain string - var resource string - var actions string - - if ptype == "p" { - if len(rule) < 4 { - return false, fmt.Errorf("invalid rule length. must be 'user/role, domain, resource, actions'") - } - - username = rule[0] - domain = rule[1] - resource = rule[2] - actions = formatList(rule[3]) - } else if ptype == "g" { - if len(rule) < 3 { - return false, fmt.Errorf("invalid rule length. must be 'user, role, domain'") - } - - username = rule[0] - role = rule[1] - domain = rule[2] - } else { - return false, fmt.Errorf("unknown ptype: %s", ptype) - } - - var dom *Domain = nil - for i := range a.domains { - if a.domains[i].Name == domain { - dom = &a.domains[i] - break - } - } - - if dom == nil { - // if we can't find any domain with that name, then the policy doesn't exist - return false, nil - } - - if ptype == "p" { - isRole := false - if strings.HasPrefix(username, "role:") { - isRole = true - username = strings.TrimPrefix(username, "role:") - } - - if isRole { - roles, ok := dom.Roles[username] - if !ok { - // unknown role, policy doesn't exist - return false, nil - } - - for _, role := range roles { - if role.Resource == resource && formatList(role.Actions) == actions { - return true, nil - } - } - } else { - for _, p := range dom.Policies { - if p.Username == username && p.Resource == resource && formatList(p.Actions) == actions { - return true, nil - } - } - } - } else { - role = strings.TrimPrefix(role, "role:") - for _, user := range dom.UserRoles { - if user.Username == username && user.Role == role { - return true, nil - } - } - } - - return false, nil -} - -// Adapter (auto-save) -func (a *adapter) RemovePolicy(sec string, ptype string, rule []string) error { - a.lock.Lock() - defer a.lock.Unlock() - - err := a.removePolicy(ptype, rule) - if err != nil { - return err - } - - return a.savePolicyFile() -} - -// BatchAdapter (auto-save) -func (a *adapter) RemovePolicies(sec string, ptype string, rules [][]string) error { - a.lock.Lock() - defer a.lock.Unlock() - - for _, rule := range rules { - err := a.removePolicy(ptype, rule) - if err != nil { - return err - } - } - - return a.savePolicyFile() -} - -func (a *adapter) removePolicy(ptype string, rule []string) error { - ok, err := a.hasPolicy(ptype, rule) - if err != nil { - return err - } - - if !ok { - // the policy is not there, nothing to remove - return nil - } - - username := "" - role := "" - domain := "" - resource := "" - actions := "" - - if ptype == "p" { - username = rule[0] - domain = rule[1] - resource = rule[2] - actions = formatList(rule[3]) - - a.logger.Debug().WithFields(log.Fields{ - "subject": username, - "domain": domain, - "resource": resource, - "actions": actions, - }).Log("Removing policy") - } else if ptype == "g" { - username = rule[0] - role = rule[1] - domain = rule[2] - - a.logger.Debug().WithFields(log.Fields{ - "subject": username, - "role": role, - "domain": domain, - }).Log("Removing role mapping") - } else { - return fmt.Errorf("unknown ptype: %s", ptype) - } - - var dom *Domain = nil - for i := range a.domains { - if a.domains[i].Name == domain { - dom = &a.domains[i] - break - } - } - - if ptype == "p" { - isRole := false - if strings.HasPrefix(username, "role:") { - isRole = true - username = strings.TrimPrefix(username, "role:") - } - - if isRole { - roles := dom.Roles[username] - - newRoles := []Role{} - - for _, role := range roles { - if role.Resource == resource && formatList(role.Actions) == actions { - continue - } - - newRoles = append(newRoles, role) - } - - dom.Roles[username] = newRoles - } else { - policies := []DomainPolicy{} - - for _, p := range dom.Policies { - if p.Username == username && p.Resource == resource && formatList(p.Actions) == actions { - continue - } - - policies = append(policies, p) - } - - dom.Policies = policies - } - } else { - role = strings.TrimPrefix(role, "role:") - - users := []MapUserRole{} - - for _, user := range dom.UserRoles { - if user.Username == username && user.Role == role { - continue - } - - users = append(users, user) - } - - dom.UserRoles = users - } - - // Remove the group if there are no rules and policies - if len(dom.Roles) == 0 && len(dom.UserRoles) == 0 && len(dom.Policies) == 0 { - groups := []Domain{} - - for _, g := range a.domains { - if g.Name == dom.Name { - continue - } - - groups = append(groups, g) - } - - a.domains = groups - } - - return nil -} - -// Adapter -func (a *adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { - return fmt.Errorf("not implemented") -} - -func (a *adapter) AllDomains() []string { - a.lock.Lock() - defer a.lock.Unlock() - - names := []string{} - - for _, domain := range a.domains { - if domain.Name[0] == '$' { - continue - } - - names = append(names, domain.Name) - } - - return names -} - -func (a *adapter) HasDomain(name string) bool { - a.lock.Lock() - defer a.lock.Unlock() - - for _, domain := range a.domains { - if domain.Name[0] == '$' { - continue - } - - if domain.Name == name { - return true - } - } - - return false -} - -type Domain struct { - Name string `json:"name"` - Roles map[string][]Role `json:"roles"` - UserRoles []MapUserRole `json:"userroles"` - Policies []DomainPolicy `json:"policies"` -} - -type Role struct { - Resource string `json:"resource"` - Actions string `json:"actions"` -} - -type MapUserRole struct { - Username string `json:"username"` - Role string `json:"role"` -} - -type DomainPolicy struct { - Username string `json:"username"` - Role -} - -func formatList(list string) string { - a := strings.Split(list, "|") - - sort.Strings(a) - - return strings.Join(a, "|") -} diff --git a/iam/access/functions.go b/iam/access/functions.go deleted file mode 100644 index 07bc70e9..00000000 --- a/iam/access/functions.go +++ /dev/null @@ -1,116 +0,0 @@ -package access - -import ( - "strings" - "sync" - - "github.com/datarhei/core/v16/glob" -) - -var globcache = map[string]glob.Glob{} -var globcacheMu = sync.RWMutex{} - -func resourceMatch(request, policy string) bool { - reqPrefix, reqResource := getPrefix(request) - polPrefix, polResource := getPrefix(policy) - - var match bool = false - var err error = nil - - reqType := strings.ToLower(reqPrefix) - polTypes := strings.Split(strings.ToLower(polPrefix), "|") - - for _, polType := range polTypes { - if reqType != polType { - continue - } - - match = true - break - } - - if !match { - return false - } - - match = false - - key := reqType + polResource - - if reqType == "api" || reqType == "fs" || reqType == "rtmp" || reqType == "srt" { - globcacheMu.RLock() - matcher, ok := globcache[key] - globcacheMu.RUnlock() - if !ok { - matcher, err = glob.Compile(polResource, rune('/')) - if err != nil { - return false - } - globcacheMu.Lock() - globcache[key] = matcher - globcacheMu.Unlock() - } - - match = matcher.Match(reqResource) - } else { - globcacheMu.RLock() - matcher, ok := globcache[key] - globcacheMu.RUnlock() - if !ok { - matcher, err = glob.Compile(polResource) - if err != nil { - return false - } - globcacheMu.Lock() - globcache[key] = matcher - globcacheMu.Unlock() - } - - match = matcher.Match(reqResource) - } - - return match -} - -func resourceMatchFunc(args ...interface{}) (interface{}, error) { - request := args[0].(string) - policy := args[1].(string) - - return (bool)(resourceMatch(request, policy)), nil -} - -func actionMatch(request string, policy string) bool { - request = strings.ToUpper(request) - actions := strings.Split(strings.ToUpper(policy), "|") - if len(actions) == 0 { - return false - } - - if len(actions) == 1 && actions[0] == "ANY" { - return true - } - - for _, a := range actions { - if request == a { - return true - } - } - - return false -} - -func actionMatchFunc(args ...interface{}) (interface{}, error) { - request := args[0].(string) - policy := args[1].(string) - - return (bool)(actionMatch(request, policy)), nil -} - -func getPrefix(s string) (string, string) { - prefix, resource, found := strings.Cut(s, ":") - if !found { - return "", s - } - - return prefix, resource -} diff --git a/iam/iam.go b/iam/iam.go index 6b22404b..d84230e3 100644 --- a/iam/iam.go +++ b/iam/iam.go @@ -1,8 +1,8 @@ package iam import ( - "github.com/datarhei/core/v16/iam/access" "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/log" ) @@ -19,7 +19,7 @@ type IAM interface { HasPolicy(name, domain string, types []string, resource string, actions []string) bool AddPolicy(name, domain string, types []string, resource string, actions []string) error RemovePolicy(name, domain string, types []string, resource string, actions []string) error - ListPolicies(name, domain string, types []string, resource string, actions []string) []access.Policy + ListPolicies(name, domain string, types []string, resource string, actions []string) []policy.Policy ReloadPolicies() error Validators() []string @@ -42,13 +42,13 @@ type IAM interface { type iam struct { im identity.Manager - am access.Manager + am policy.Manager logger log.Logger } type Config struct { - PolicyAdapter access.Adapter + PolicyAdapter policy.Adapter IdentityAdapter identity.Adapter Superuser identity.User JWTRealm string @@ -68,9 +68,10 @@ func New(config Config) (IAM, error) { return nil, err } - am, err := access.New(access.Config{ - Adapter: config.PolicyAdapter, - Logger: config.Logger, + am, err := policy.New(policy.Config{ + Superuser: "$superuser", + Adapter: config.PolicyAdapter, + Logger: config.Logger, }) if err != nil { return nil, err @@ -126,16 +127,12 @@ func (i *iam) Enforce(name, domain, rtype, resource, action string) bool { name = "$superuser" } - ok, rule := i.am.Enforce(name, domain, rtype, resource, action) + ok, policy := i.am.Enforce(name, domain, rtype, resource, action) if !ok { l.Log("no match") } else { - if name == "$superuser" { - rule = "" - } - - l.WithField("rule", rule).Log("match") + l.WithField("policy", policy).Log("match") } return ok @@ -234,11 +231,11 @@ func (i *iam) RemovePolicy(name, domain string, types []string, resource string, } } - return i.am.RemovePolicy(name, domain, types, resource, actions) + return i.am.RemovePolicy(name, domain) } -func (i *iam) ListPolicies(name, domain string, types []string, resource string, actions []string) []access.Policy { - return i.am.ListPolicies(name, domain, types, resource, actions) +func (i *iam) ListPolicies(name, domain string, types []string, resource string, actions []string) []policy.Policy { + return i.am.ListPolicies(name, domain) } func (i *iam) ReloadPolicies() error { diff --git a/iam/policy/access.go b/iam/policy/access.go new file mode 100644 index 00000000..1d69bfce --- /dev/null +++ b/iam/policy/access.go @@ -0,0 +1,35 @@ +package policy + +import ( + "fmt" + "strings" +) + +type Policy struct { + Name string + Domain string + Types []string + Resource string + Actions []string +} + +func (p Policy) String() string { + return fmt.Sprintf("%s@%s (%s):%s %s", p.Name, p.Domain, strings.Join(p.Types, "|"), p.Resource, strings.Join(p.Actions, "|")) +} + +type Enforcer interface { + Enforce(name, domain, rtype, resource, action string) (bool, Policy) + + HasDomain(name string) bool + ListDomains() []string +} + +type Manager interface { + Enforcer + + HasPolicy(name, domain string, types []string, resource string, actions []string) bool + AddPolicy(name, domain string, types []string, resource string, actions []string) error + RemovePolicy(name, domain string) error + ListPolicies(name, domain string) []Policy + ReloadPolicies() error +} diff --git a/iam/policy/adapter.go b/iam/policy/adapter.go new file mode 100644 index 00000000..ccc0c21c --- /dev/null +++ b/iam/policy/adapter.go @@ -0,0 +1,418 @@ +package policy + +import ( + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + "sync" + + "github.com/datarhei/core/v16/io/fs" + "github.com/datarhei/core/v16/log" +) + +type policyadapter struct { + fs fs.Filesystem + filePath string + logger log.Logger + domains []Domain + lock sync.Mutex +} + +type Adapter interface { + AddPolicy(policy Policy) error + LoadPolicy(model Model) error + RemovePolicy(policy Policy) error + SavePolicy(model Model) error + AddPolicies(policies []Policy) error + RemovePolicies(policies []Policy) error + + AllDomains() []string + HasDomain(string) bool +} + +func NewJSONAdapter(fs fs.Filesystem, filePath string, logger log.Logger) (Adapter, error) { + a := &policyadapter{ + fs: fs, + filePath: filePath, + logger: logger, + } + + if a.fs == nil { + return nil, fmt.Errorf("a filesystem has to be provided") + } + + if len(a.filePath) == 0 { + return nil, fmt.Errorf("invalid file path, file path cannot be empty") + } + + if a.logger == nil { + a.logger = log.New("") + } + + return a, nil +} + +// Adapter +func (a *policyadapter) LoadPolicy(model Model) error { + a.lock.Lock() + defer a.lock.Unlock() + + return a.loadPolicyFile(model) +} + +func (a *policyadapter) loadPolicyFile(model Model) error { + if _, err := a.fs.Stat(a.filePath); err != nil { + if errors.Is(err, fs.ErrNotExist) { + a.domains = []Domain{} + return nil + } + + return err + } + + data, err := a.fs.ReadFile(a.filePath) + if err != nil { + return err + } + + domains := []Domain{} + + err = json.Unmarshal(data, &domains) + if err != nil { + return err + } + + for _, domain := range domains { + for _, policy := range domain.Policies { + rtypes, resource := DecodeResource(policy.Resource) + p := normalizePolicy(Policy{ + Name: policy.Username, + Domain: domain.Name, + Types: rtypes, + Resource: resource, + Actions: DecodeActions(policy.Actions), + }) + + if err := a.importPolicy(model, p); err != nil { + return err + } + } + } + + a.domains = domains + + return nil +} + +func (a *policyadapter) importPolicy(model Model, policy Policy) error { + a.logger.Debug().WithFields(log.Fields{ + "subject": policy.Name, + "domain": policy.Domain, + "types": policy.Types, + "resource": policy.Resource, + "actions": policy.Actions, + }).Log("Imported policy") + + model.AddPolicy(policy) + + return nil +} + +// Adapter +func (a *policyadapter) SavePolicy(model Model) error { + a.lock.Lock() + defer a.lock.Unlock() + + return a.savePolicyFile() +} + +func (a *policyadapter) savePolicyFile() error { + jsondata, err := json.MarshalIndent(a.domains, "", " ") + if err != nil { + return err + } + + _, _, err = a.fs.WriteFileSafe(a.filePath, jsondata) + + return err +} + +// Adapter (auto-save) +func (a *policyadapter) AddPolicy(policy Policy) error { + a.lock.Lock() + defer a.lock.Unlock() + + err := a.addPolicy(policy) + if err != nil { + return err + } + + return a.savePolicyFile() +} + +// BatchAdapter (auto-save) +func (a *policyadapter) AddPolicies(policies []Policy) error { + a.lock.Lock() + defer a.lock.Unlock() + + for _, policy := range policies { + err := a.addPolicy(policy) + if err != nil { + return err + } + } + + return a.savePolicyFile() +} + +func (a *policyadapter) addPolicy(policy Policy) error { + ok, err := a.hasPolicy(policy) + if err != nil { + return err + } + + if ok { + // the policy is already there, nothing to add + return nil + } + + policy = normalizePolicy(policy) + + username := policy.Name + domain := policy.Domain + resource := EncodeResource(policy.Types, policy.Resource) + actions := EncodeActions(policy.Actions) + + a.logger.Debug().WithFields(log.Fields{ + "subject": username, + "domain": domain, + "resource": resource, + "actions": actions, + }).Log("Adding policy") + + var dom *Domain = nil + for i := range a.domains { + if a.domains[i].Name == domain { + dom = &a.domains[i] + break + } + } + + if dom == nil { + g := Domain{ + Name: domain, + Policies: []DomainPolicy{}, + } + + a.domains = append(a.domains, g) + dom = &a.domains[len(a.domains)-1] + } + + dom.Policies = append(dom.Policies, DomainPolicy{ + Username: username, + Resource: resource, + Actions: actions, + }) + + return nil +} + +func (a *policyadapter) hasPolicy(policy Policy) (bool, error) { + policy = normalizePolicy(policy) + + username := policy.Name + domain := policy.Domain + resource := EncodeResource(policy.Types, policy.Resource) + actions := EncodeActions(policy.Actions) + + var dom *Domain = nil + for i := range a.domains { + if a.domains[i].Name == domain { + dom = &a.domains[i] + break + } + } + + if dom == nil { + // if we can't find any domain with that name, then the policy doesn't exist + return false, nil + } + + for _, p := range dom.Policies { + if p.Username == username && p.Resource == resource && p.Actions == actions { + return true, nil + } + } + + return false, nil +} + +// Adapter (auto-save) +func (a *policyadapter) RemovePolicy(policy Policy) error { + a.lock.Lock() + defer a.lock.Unlock() + + err := a.removePolicy(policy) + if err != nil { + return err + } + + return a.savePolicyFile() +} + +// BatchAdapter (auto-save) +func (a *policyadapter) RemovePolicies(policies []Policy) error { + a.lock.Lock() + defer a.lock.Unlock() + + for _, policy := range policies { + err := a.removePolicy(policy) + if err != nil { + return err + } + } + + return a.savePolicyFile() +} + +func (a *policyadapter) removePolicy(policy Policy) error { + ok, err := a.hasPolicy(policy) + if err != nil { + return err + } + + if !ok { + // the policy is not there, nothing to remove + return nil + } + + policy = normalizePolicy(policy) + + username := policy.Name + domain := policy.Domain + resource := EncodeResource(policy.Types, policy.Resource) + actions := EncodeActions(policy.Actions) + + a.logger.Debug().WithFields(log.Fields{ + "subject": username, + "domain": domain, + "resource": resource, + "actions": actions, + }).Log("Removing policy") + + var dom *Domain = nil + for i := range a.domains { + if a.domains[i].Name == domain { + dom = &a.domains[i] + break + } + } + + policies := []DomainPolicy{} + + for _, p := range dom.Policies { + if p.Username == username && p.Resource == resource && p.Actions == actions { + continue + } + + policies = append(policies, p) + } + + dom.Policies = policies + + // Remove the group if there are no rules and policies + if len(dom.Policies) == 0 { + groups := []Domain{} + + for _, g := range a.domains { + if g.Name == dom.Name { + continue + } + + groups = append(groups, g) + } + + a.domains = groups + } + + return nil +} + +// Adapter +func (a *policyadapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { + return fmt.Errorf("not implemented") +} + +func (a *policyadapter) AllDomains() []string { + a.lock.Lock() + defer a.lock.Unlock() + + names := []string{} + + for _, domain := range a.domains { + if domain.Name[0] == '$' { + continue + } + + names = append(names, domain.Name) + } + + return names +} + +func (a *policyadapter) HasDomain(name string) bool { + a.lock.Lock() + defer a.lock.Unlock() + + for _, domain := range a.domains { + if domain.Name[0] == '$' { + continue + } + + if domain.Name == name { + return true + } + } + + return false +} + +type Domain struct { + Name string `json:"name"` + Policies []DomainPolicy `json:"policies"` +} + +type DomainPolicy struct { + Username string `json:"username"` + Resource string `json:"resource"` + Actions string `json:"actions"` +} + +func EncodeActions(actions []string) string { + return strings.Join(actions, "|") +} + +func DecodeActions(actions string) []string { + return strings.Split(actions, "|") +} + +func EncodeResource(types []string, resource string) string { + if len(types) == 0 { + return resource + } + + sort.Strings(types) + + return strings.Join(types, "|") + ":" + resource +} + +func DecodeResource(resource string) ([]string, string) { + before, after, found := strings.Cut(resource, ":") + if !found { + return []string{"$none"}, resource + } + + return strings.Split(before, "|"), after +} diff --git a/iam/access/adapter_test.go b/iam/policy/adapter_test.go similarity index 61% rename from iam/access/adapter_test.go rename to iam/policy/adapter_test.go index fbc72806..16a0ae89 100644 --- a/iam/access/adapter_test.go +++ b/iam/policy/adapter_test.go @@ -1,4 +1,4 @@ -package access +package policy import ( "testing" @@ -16,10 +16,16 @@ func TestAddPolicy(t *testing.T) { ai, err := NewJSONAdapter(memfs, "/policy.json", nil) require.NoError(t, err) - a, ok := ai.(*adapter) + a, ok := ai.(*policyadapter) require.True(t, ok) - err = a.AddPolicy("p", "p", []string{"foobar", "group", "resource", "action"}) + err = a.AddPolicy(Policy{ + Name: "foobar", + Domain: "group", + Types: []string{}, + Resource: "resource", + Actions: []string{"action"}, + }) require.NoError(t, err) require.Equal(t, 1, len(a.domains)) @@ -35,24 +41,11 @@ func TestAddPolicy(t *testing.T) { require.Equal(t, 1, len(g[0].Policies)) require.Equal(t, DomainPolicy{ Username: "foobar", - Role: Role{ - Resource: "resource", - Actions: "action", - }, + Resource: "resource", + Actions: "action", }, g[0].Policies[0]) } -func TestFormatActions(t *testing.T) { - data := [][]string{ - {"a|b|c", "a|b|c"}, - {"b|c|a", "a|b|c"}, - } - - for _, d := range data { - require.Equal(t, d[1], formatList(d[0]), d[0]) - } -} - func TestRemovePolicy(t *testing.T) { memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) require.NoError(t, err) @@ -60,25 +53,49 @@ func TestRemovePolicy(t *testing.T) { ai, err := NewJSONAdapter(memfs, "/policy.json", nil) require.NoError(t, err) - a, ok := ai.(*adapter) + a, ok := ai.(*policyadapter) require.True(t, ok) - err = a.AddPolicies("p", "p", [][]string{ - {"foobar1", "group", "resource1", "action1"}, - {"foobar2", "group", "resource2", "action2"}, + err = a.AddPolicies([]Policy{ + { + Name: "foobar1", + Domain: "group", + Types: []string{}, + Resource: "resource1", + Actions: []string{"action1"}, + }, + { + Name: "foobar2", + Domain: "group", + Types: []string{}, + Resource: "resource2", + Actions: []string{"action2"}, + }, }) require.NoError(t, err) require.Equal(t, 1, len(a.domains)) require.Equal(t, 2, len(a.domains[0].Policies)) - err = a.RemovePolicy("p", "p", []string{"foobar1", "group", "resource1", "action1"}) + err = a.RemovePolicy(Policy{ + Name: "foobar1", + Domain: "group", + Types: []string{}, + Resource: "resource1", + Actions: []string{"action1"}, + }) require.NoError(t, err) require.Equal(t, 1, len(a.domains)) require.Equal(t, 1, len(a.domains[0].Policies)) - err = a.RemovePolicy("p", "p", []string{"foobar2", "group", "resource2", "action2"}) + err = a.RemovePolicy(Policy{ + Name: "foobar2", + Domain: "group", + Types: []string{}, + Resource: "resource2", + Actions: []string{"action2"}, + }) require.NoError(t, err) require.Equal(t, 0, len(a.domains)) diff --git a/iam/policy/enforcer.go b/iam/policy/enforcer.go new file mode 100644 index 00000000..ea3e6e9d --- /dev/null +++ b/iam/policy/enforcer.go @@ -0,0 +1,93 @@ +package policy + +import ( + "github.com/puzpuzpuz/xsync/v3" +) + +type PolicyEnforcer struct { + model Model + adapter Adapter + lock *xsync.RBMutex +} + +func NewEnforcer(model Model, adapter Adapter) *PolicyEnforcer { + e := &PolicyEnforcer{ + model: model, + adapter: adapter, + lock: xsync.NewRBMutex(), + } + + e.ReloadPolicies() + + return e +} + +func (e *PolicyEnforcer) HasPolicy(policy Policy) bool { + token := e.lock.RLock() + defer e.lock.RUnlock(token) + + return e.model.HasPolicy(policy) +} + +func (e *PolicyEnforcer) AddPolicy(policy Policy) error { + token := e.lock.RLock() + defer e.lock.RUnlock(token) + + err := e.model.AddPolicy(policy) + if err != nil { + return err + } + + if e.adapter != nil { + e.adapter.AddPolicy(policy) + } + + return nil +} + +func (e *PolicyEnforcer) RemovePolicies(policies []Policy) error { + token := e.lock.RLock() + defer e.lock.RUnlock(token) + + err := e.model.RemovePolicies(policies) + if err != nil { + return err + } + + if e.adapter != nil { + e.adapter.RemovePolicies(policies) + } + + return nil +} + +func (e *PolicyEnforcer) GetFilteredPolicy(name, domain string) []Policy { + token := e.lock.RLock() + defer e.lock.RUnlock(token) + + return e.model.GetFilteredPolicy(name, domain) +} + +func (e *PolicyEnforcer) ReloadPolicies() error { + e.lock.Lock() + defer e.lock.Unlock() + + e.model.ClearPolicy() + + return e.adapter.LoadPolicy(e.model) +} + +func (e *PolicyEnforcer) Enforce(name, domain, rtype, resource, action string) (bool, Policy) { + token := e.lock.RLock() + defer e.lock.RUnlock(token) + + return e.model.Enforce(name, domain, rtype, resource, action) +} + +func (e *PolicyEnforcer) HasDomain(name string) bool { + return e.adapter.HasDomain(name) +} + +func (e *PolicyEnforcer) ListDomains() []string { + return e.adapter.AllDomains() +} diff --git a/iam/access/fixtures/policy.json b/iam/policy/fixtures/policy.json similarity index 100% rename from iam/access/fixtures/policy.json rename to iam/policy/fixtures/policy.json diff --git a/iam/policy/manager.go b/iam/policy/manager.go new file mode 100644 index 00000000..ffb83636 --- /dev/null +++ b/iam/policy/manager.go @@ -0,0 +1,92 @@ +package policy + +import ( + "fmt" + + "github.com/datarhei/core/v16/log" +) + +type policyaccess struct { + logger log.Logger + + adapter Adapter + model Model + enforcer *PolicyEnforcer +} + +type Config struct { + Adapter Adapter + Logger log.Logger + Superuser string +} + +func New(config Config) (Manager, error) { + am := &policyaccess{ + adapter: config.Adapter, + logger: config.Logger, + } + + if am.adapter == nil { + return nil, fmt.Errorf("missing adapter") + } + + if am.logger == nil { + am.logger = log.New("") + } + + m := NewModel(config.Superuser) + e := NewEnforcer(m, am.adapter) + + am.enforcer = e + am.model = m + + return am, nil +} + +func (am *policyaccess) Enforce(name, domain, rtype, resource, action string) (bool, Policy) { + return am.enforcer.Enforce(name, domain, rtype, resource, action) +} + +func (am *policyaccess) HasPolicy(name, domain string, types []string, resource string, actions []string) bool { + return am.enforcer.HasPolicy(Policy{ + Name: name, + Domain: domain, + Types: types, + Resource: resource, + Actions: actions, + }) +} + +func (am *policyaccess) AddPolicy(name, domain string, types []string, resource string, actions []string) error { + policy := Policy{ + Name: name, + Domain: domain, + Types: types, + Resource: resource, + Actions: actions, + } + + return am.enforcer.AddPolicy(policy) +} + +func (am *policyaccess) RemovePolicy(name, domain string) error { + policies := am.enforcer.GetFilteredPolicy(name, domain) + + return am.enforcer.RemovePolicies(policies) +} + +func (am *policyaccess) ListPolicies(name, domain string) []Policy { + return am.enforcer.GetFilteredPolicy(name, domain) +} + +func (am *policyaccess) ReloadPolicies() error { + return am.enforcer.ReloadPolicies() +} + +func (am *policyaccess) HasDomain(name string) bool { + return am.adapter.HasDomain(name) +} + +func (am *policyaccess) ListDomains() []string { + return am.adapter.AllDomains() +} diff --git a/iam/access/access_test.go b/iam/policy/manager_test.go similarity index 66% rename from iam/access/access_test.go rename to iam/policy/manager_test.go index b857dd69..830fb3ad 100644 --- a/iam/access/access_test.go +++ b/iam/policy/manager_test.go @@ -1,9 +1,13 @@ -package access +package policy import ( + "fmt" + "math/rand/v2" + "sync" "testing" "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" ) @@ -26,7 +30,7 @@ func TestAccessManager(t *testing.T) { }) require.NoError(t, err) - policies := am.ListPolicies("", "", nil, "", nil) + policies := am.ListPolicies("", "") require.ElementsMatch(t, []Policy{ { Name: "ingo", @@ -46,7 +50,7 @@ func TestAccessManager(t *testing.T) { am.AddPolicy("foobar", "group", []string{"bla", "blubb"}, "/", []string{"write"}) - policies = am.ListPolicies("", "", nil, "", nil) + policies = am.ListPolicies("", "") require.ElementsMatch(t, []Policy{ { Name: "ingo", @@ -75,9 +79,9 @@ func TestAccessManager(t *testing.T) { require.True(t, am.HasDomain("group")) require.False(t, am.HasDomain("$none")) - am.RemovePolicy("ingo", "", nil, "", nil) + am.RemovePolicy("ingo", "") - policies = am.ListPolicies("", "", nil, "", nil) + policies = am.ListPolicies("", "") require.ElementsMatch(t, []Policy{ { Name: "foobar", @@ -112,12 +116,57 @@ func BenchmarkEnforce(b *testing.B) { }) require.NoError(b, err) - am.AddPolicy("$anon", "$none", []string{"foobar"}, "**", []string{"ANY"}) + names := []string{} + + for i := 0; i < 1000; i++ { + name := fmt.Sprintf("user%d", i) + names = append(names, name) + am.AddPolicy(name, "$none", []string{"foobar"}, "**", []string{"ANY"}) + } b.ResetTimer() for i := 0; i < b.N; i++ { - ok, _ := am.Enforce("$anon", "$none", "foobar", "baz", "read") + name := names[rand.IntN(1000)] + ok, _ := am.Enforce(name, "$none", "foobar", "baz", "read") require.True(b, ok) } } + +func BenchmarkConcurrentEnforce(b *testing.B) { + adapter, err := createAdapter() + require.NoError(b, err) + + am, err := New(Config{ + Adapter: adapter, + Logger: nil, + }) + require.NoError(b, err) + + names := []string{} + + for i := 0; i < 1000; i++ { + name := fmt.Sprintf("user%d", i) + names = append(names, name) + am.AddPolicy(name, "$none", []string{"foobar"}, "**", []string{"ANY"}) + } + + b.ResetTimer() + + readerWg := sync.WaitGroup{} + + for i := 0; i < 1000; i++ { + readerWg.Add(1) + go func() { + defer readerWg.Done() + + for i := 0; i < b.N; i++ { + name := names[rand.IntN(1000)] + ok, _ := am.Enforce(name, "$none", "foobar", "baz", "read") + require.True(b, ok) + } + }() + } + + readerWg.Wait() +} diff --git a/iam/policy/matcher.go b/iam/policy/matcher.go new file mode 100644 index 00000000..799ca7b5 --- /dev/null +++ b/iam/policy/matcher.go @@ -0,0 +1,72 @@ +package policy + +import ( + "slices" + "sync" + + "github.com/datarhei/core/v16/glob" +) + +var globcache = map[string]glob.Glob{} +var globcacheMu = sync.RWMutex{} + +func resourceMatch(requestType, requestResource string, policyTypes []string, policyResource string) bool { + var match bool = false + var err error = nil + + if !slices.Contains(policyTypes, requestType) { + return false + } + + key := requestType + policyResource + + if requestType == "api" || requestType == "fs" || requestType == "rtmp" || requestType == "srt" { + globcacheMu.RLock() + matcher, ok := globcache[key] + globcacheMu.RUnlock() + if !ok { + matcher, err = glob.Compile(policyResource, rune('/')) + if err != nil { + return false + } + globcacheMu.Lock() + globcache[key] = matcher + globcacheMu.Unlock() + } + + match = matcher.Match(requestResource) + } else { + globcacheMu.RLock() + matcher, ok := globcache[key] + globcacheMu.RUnlock() + if !ok { + matcher, err = glob.Compile(policyResource) + if err != nil { + return false + } + globcacheMu.Lock() + globcache[key] = matcher + globcacheMu.Unlock() + } + + match = matcher.Match(requestResource) + } + + return match +} + +func actionMatch(requestAction string, policyActions []string, wildcard string) bool { + if len(policyActions) == 0 { + return false + } + + if len(policyActions) == 1 && policyActions[0] == wildcard { + return true + } + + if slices.Contains(policyActions, requestAction) { + return true + } + + return false +} diff --git a/iam/policy/matcher_test.go b/iam/policy/matcher_test.go new file mode 100644 index 00000000..520bb72f --- /dev/null +++ b/iam/policy/matcher_test.go @@ -0,0 +1,35 @@ +package policy + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestResourceMatcher(t *testing.T) { + ok := resourceMatch("fs", "/", []string{"api", "fs"}, "/") + require.True(t, ok) + + ok = resourceMatch("bla", "/", []string{"api", "fs"}, "/") + require.False(t, ok) + + ok = resourceMatch("fs", "/foo", []string{"api", "fs"}, "/") + require.False(t, ok) + + ok = resourceMatch("fs", "/foo", []string{"api", "fs"}, "/*") + require.True(t, ok) + + ok = resourceMatch("fs", "/foo/boz", []string{"api", "fs"}, "/*") + require.False(t, ok) + + ok = resourceMatch("fs", "/foo/boz", []string{"api", "fs"}, "/**") + require.True(t, ok) +} + +func TestActionMatcher(t *testing.T) { + ok := actionMatch("get", []string{"any"}, "any") + require.True(t, ok) + + ok = actionMatch("get", []string{"get", "head"}, "any") + require.True(t, ok) +} diff --git a/iam/policy/model.go b/iam/policy/model.go new file mode 100644 index 00000000..9f9e36f6 --- /dev/null +++ b/iam/policy/model.go @@ -0,0 +1,238 @@ +package policy + +import ( + "slices" + "strings" + + "github.com/puzpuzpuz/xsync/v3" +) + +type Model interface { + Enforce(name, domain, rtype, resource, action string) (bool, Policy) + HasPolicy(policy Policy) bool + AddPolicy(policy Policy) error + AddPolicies(policies []Policy) error + RemovePolicy(policy Policy) error + RemovePolicies(policies []Policy) error + GetFilteredPolicy(name, domain string) []Policy + ClearPolicy() +} + +type model struct { + superuser string + policies map[string][]Policy // user@domain + lock *xsync.RBMutex +} + +func NewModel(superuser string) Model { + m := &model{ + superuser: superuser, + policies: map[string][]Policy{}, + lock: xsync.NewRBMutex(), + } + + return m +} + +func (m *model) HasPolicy(policy Policy) bool { + token := m.lock.RLock() + defer m.lock.RUnlock(token) + + return m.hasPolicy(policy) +} + +func (m *model) hasPolicy(policy Policy) bool { + key := policy.Name + "@" + policy.Domain + + policies, hasKey := m.policies[key] + if !hasKey { + return false + } + + policy = normalizePolicy(policy) + + for _, p := range policies { + if slices.Equal(p.Types, policy.Types) && p.Resource == policy.Resource && slices.Equal(p.Actions, policy.Actions) { + return true + } + } + + return false +} + +func (m *model) AddPolicy(policy Policy) error { + m.lock.Lock() + defer m.lock.Unlock() + + return m.addPolicy(policy) +} + +func (m *model) AddPolicies(policies []Policy) error { + m.lock.Lock() + defer m.lock.Unlock() + + for _, policy := range policies { + m.addPolicy(policy) + } + + return nil +} + +func (m *model) addPolicy(policy Policy) error { + if m.hasPolicy(policy) { + return nil + } + + policy = normalizePolicy(policy) + key := policy.Name + "@" + policy.Domain + + policies, hasKey := m.policies[key] + if !hasKey { + policies = []Policy{} + } + + policies = append(policies, policy) + m.policies[key] = policies + + return nil +} + +func (m *model) RemovePolicy(policy Policy) error { + m.lock.Lock() + defer m.lock.Unlock() + + return m.removePolicy(policy) +} + +func (m *model) RemovePolicies(policies []Policy) error { + m.lock.Lock() + defer m.lock.Unlock() + + for _, policy := range policies { + m.removePolicy(policy) + } + + return nil +} + +func (m *model) removePolicy(policy Policy) error { + if !m.hasPolicy(policy) { + return nil + } + + policy = normalizePolicy(policy) + key := policy.Name + "@" + policy.Domain + + policies := m.policies[key] + + newPolicies := []Policy{} + + for _, p := range policies { + if slices.Equal(p.Types, policy.Types) && p.Resource == policy.Resource && slices.Equal(p.Actions, policy.Actions) { + continue + } + + newPolicies = append(newPolicies, p) + } + + if len(newPolicies) != 0 { + m.policies[key] = newPolicies + } else { + delete(m.policies, key) + } + + return nil +} + +func (m *model) GetFilteredPolicy(name, domain string) []Policy { + token := m.lock.RLock() + defer m.lock.RUnlock(token) + + filteredPolicies := []Policy{} + + if len(name) == 0 && len(domain) == 0 { + for _, policies := range m.policies { + filteredPolicies = append(filteredPolicies, policies...) + } + } else if len(name) != 0 && len(domain) == 0 { + for key, policies := range m.policies { + if !strings.HasPrefix(key, name+"@") { + continue + } + + filteredPolicies = append(filteredPolicies, policies...) + } + } else if len(name) == 0 && len(domain) != 0 { + for key, policies := range m.policies { + if !strings.HasSuffix(key, "@"+domain) { + continue + } + + filteredPolicies = append(filteredPolicies, policies...) + } + } else { + for key, policies := range m.policies { + before, after, _ := strings.Cut(key, "@") + + if name != before || domain != after { + continue + } + + filteredPolicies = append(filteredPolicies, policies...) + } + } + + return filteredPolicies +} + +func (m *model) ClearPolicy() { + m.lock.Lock() + defer m.lock.Unlock() + + m.policies = map[string][]Policy{} +} + +func (m *model) Enforce(name, domain, rtype, resource, action string) (bool, Policy) { + token := m.lock.RLock() + defer m.lock.RUnlock(token) + + if name == m.superuser { + return true, Policy{ + Name: m.superuser, + } + } + + key := name + "@" + domain + + policies, hasKey := m.policies[key] + if !hasKey { + return false, Policy{} + } + + rtype = strings.ToLower(rtype) + action = strings.ToLower(action) + + for _, p := range policies { + if resourceMatch(rtype, resource, p.Types, p.Resource) && actionMatch(action, p.Actions, "any") { + return true, p + } + } + + return false, Policy{} +} + +func normalizePolicy(p Policy) Policy { + for i, t := range p.Types { + p.Types[i] = strings.ToLower(t) + } + + slices.Sort(p.Types) + + for i, a := range p.Actions { + p.Actions[i] = strings.ToLower(a) + } + + slices.Sort(p.Actions) + + return p +} diff --git a/iam/policy/model_test.go b/iam/policy/model_test.go new file mode 100644 index 00000000..fe1b1392 --- /dev/null +++ b/iam/policy/model_test.go @@ -0,0 +1,354 @@ +package policy + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNormalizePolicy(t *testing.T) { + p := Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET"}, + } + + p = normalizePolicy(p) + + require.Equal(t, Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/foo/**", + Actions: []string{"get", "head", "options"}, + }, p) +} + +func TestModelNew(t *testing.T) { + m := NewModel("$superuser").(*model) + require.Equal(t, m.superuser, "$superuser") + require.NotNil(t, m.policies) + require.NotNil(t, m.lock) +} + +func TestModelAddPolicy(t *testing.T) { + p := Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET"}, + } + + m := NewModel("$superuser").(*model) + + err := m.AddPolicy(p) + require.NoError(t, err) + + require.Equal(t, 1, len(m.policies)) + require.Equal(t, 1, len(m.policies["foobar@domain"])) + require.Equal(t, Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/foo/**", + Actions: []string{"get", "head", "options"}, + }, m.policies["foobar@domain"][0]) + + m.AddPolicies([]Policy{p, p, p}) + + require.Equal(t, 1, len(m.policies)) + require.Equal(t, 1, len(m.policies["foobar@domain"])) + + p.Resource = "/bar/*" + m.AddPolicy(p) + + require.Equal(t, 2, len(m.policies["foobar@domain"])) + + p.Name = "foobaz" + m.AddPolicy(p) + + require.Equal(t, 2, len(m.policies)) + require.Equal(t, 1, len(m.policies["foobaz@domain"])) +} + +func TestModelHasPolicy(t *testing.T) { + p := Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET"}, + } + + m := NewModel("$superuser").(*model) + + ok := m.HasPolicy(p) + require.False(t, ok) + + m.AddPolicy(p) + + ok = m.HasPolicy(p) + require.True(t, ok) + + ok = m.HasPolicy(Policy{ + Name: "foobaz", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET", "put"}, + }) + require.False(t, ok) + + ok = m.HasPolicy(Policy{ + Name: "foobar", + Domain: "domaim", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET", "put"}, + }) + require.False(t, ok) + + ok = m.HasPolicy(Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET", "put"}, + }) + require.False(t, ok) + + ok = m.HasPolicy(Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/*", + Actions: []string{"Head", "OPtionS", "GET", "put"}, + }) + require.False(t, ok) + + ok = m.HasPolicy(Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET", "pot"}, + }) + require.False(t, ok) +} + +func TestModelRemovePolicy(t *testing.T) { + p := Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET"}, + } + + m := NewModel("$superuser").(*model) + m.AddPolicy(p) + + p.Resource = "/bar/*" + m.AddPolicy(p) + + require.Equal(t, 2, len(m.policies["foobar@domain"])) + + err := m.RemovePolicy(Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET", "put"}, + }) + require.NoError(t, err) + + require.Equal(t, 2, len(m.policies["foobar@domain"])) + + err = m.RemovePolicy(p) + require.NoError(t, err) + + require.Equal(t, 1, len(m.policies)) + require.Equal(t, 1, len(m.policies["foobar@domain"])) +} + +func TestModelListPolicies(t *testing.T) { + p := Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET"}, + } + + m := NewModel("$superuser").(*model) + + policies := m.GetFilteredPolicy("", "") + require.Equal(t, 0, len(policies)) + + m.addPolicy(p) + + p.Resource = "/bar/*" + m.addPolicy(p) + + p.Name = "foobaz" + m.addPolicy(p) + + p.Domain = "group" + m.addPolicy(p) + + policies = m.GetFilteredPolicy("", "") + require.Equal(t, 4, len(policies)) + require.ElementsMatch(t, []Policy{ + { + Name: "foobar", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/foo/**", + Actions: []string{"get", "head", "options"}, + }, + { + Name: "foobar", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/bar/*", + Actions: []string{"get", "head", "options"}, + }, + { + Name: "foobaz", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/bar/*", + Actions: []string{"get", "head", "options"}, + }, + { + Name: "foobaz", + Domain: "group", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/bar/*", + Actions: []string{"get", "head", "options"}, + }, + }, policies) + + policies = m.GetFilteredPolicy("foobar", "") + require.Equal(t, 2, len(policies)) + require.ElementsMatch(t, []Policy{ + { + Name: "foobar", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/foo/**", + Actions: []string{"get", "head", "options"}, + }, + { + Name: "foobar", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/bar/*", + Actions: []string{"get", "head", "options"}, + }, + }, policies) + + policies = m.GetFilteredPolicy("", "group") + require.Equal(t, 1, len(policies)) + require.ElementsMatch(t, []Policy{ + { + Name: "foobaz", + Domain: "group", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/bar/*", + Actions: []string{"get", "head", "options"}, + }, + }, policies) + + policies = m.GetFilteredPolicy("foobaz", "domain") + require.Equal(t, 1, len(policies)) + require.ElementsMatch(t, []Policy{ + { + Name: "foobaz", + Domain: "domain", + Types: []string{"api", "fs", "rtmp", "srt"}, + Resource: "/bar/*", + Actions: []string{"get", "head", "options"}, + }, + }, policies) + + policies = m.GetFilteredPolicy("foobar", "group") + require.Equal(t, 0, len(policies)) +} + +func TestModelEnforce(t *testing.T) { + p := Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET", "play"}, + } + + m := NewModel("$superuser").(*model) + + policies := m.GetFilteredPolicy("", "") + require.Equal(t, 0, len(policies)) + + m.addPolicy(p) + + ok, _ := m.Enforce("$superuser", "xxx", "something", "/nothing", "anything") + require.True(t, ok) + + ok, _ = m.Enforce("foobar", "domain", "rtmp", "/foo/bar/baz", "play") + require.True(t, ok) + + ok, _ = m.Enforce("foobar", "domain", "rtmp", "/foo/bar/baz", "publish") + require.False(t, ok) + + ok, _ = m.Enforce("foobar", "domain", "rtmp", "/fo/bar/baz", "play") + require.False(t, ok) + + ok, _ = m.Enforce("foobar", "domain", "rtsp", "/foo/bar/baz", "play") + require.False(t, ok) + + ok, _ = m.Enforce("foobar", "group", "rtmp", "/foo/bar/baz", "play") + require.False(t, ok) + + ok, _ = m.Enforce("foobaz", "domain", "rtmp", "/foo/bar/baz", "play") + require.False(t, ok) +} + +func TestModelClear(t *testing.T) { + p := Policy{ + Name: "foobar", + Domain: "domain", + Types: []string{"fs", "API", "rtMp", "srt"}, + Resource: "/foo/**", + Actions: []string{"Head", "OPtionS", "GET"}, + } + + m := NewModel("$superuser").(*model) + + policies := m.GetFilteredPolicy("", "") + require.Equal(t, 0, len(policies)) + + m.addPolicy(p) + + p.Resource = "/bar/*" + m.addPolicy(p) + + p.Name = "foobaz" + m.addPolicy(p) + + p.Domain = "group" + m.addPolicy(p) + + policies = m.GetFilteredPolicy("", "") + require.Equal(t, 4, len(policies)) + + m.ClearPolicy() + + policies = m.GetFilteredPolicy("", "") + require.Equal(t, 0, len(policies)) + + require.Empty(t, m.policies) +} diff --git a/io/fs/disk.go b/io/fs/disk.go index d4f11cc0..6cac9d40 100644 --- a/io/fs/disk.go +++ b/io/fs/disk.go @@ -330,7 +330,7 @@ func (fs *diskFilesystem) ReadFile(path string) ([]byte, error) { return os.ReadFile(path) } -func (fs *diskFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { +func (fs *diskFilesystem) WriteFileReader(path string, r io.Reader, sizeHint int) (int64, bool, error) { path = fs.cleanPath(path) replace := true @@ -366,7 +366,7 @@ func (fs *diskFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool } func (fs *diskFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { - return fs.WriteFileReader(path, bytes.NewReader(data)) + return fs.WriteFileReader(path, bytes.NewReader(data), len(data)) } func (fs *diskFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { diff --git a/io/fs/fs.go b/io/fs/fs.go index 3d162533..4cc8e201 100644 --- a/io/fs/fs.go +++ b/io/fs/fs.go @@ -93,8 +93,9 @@ type WriteFilesystem interface { // WriteFileReader adds a file to the filesystem. Returns the size of the data that has been // stored in bytes and whether the file is new. The size is negative if there was - // an error adding the file and error is not nil. - WriteFileReader(path string, r io.Reader) (int64, bool, error) + // an error adding the file and error is not nil. The size parameter is to suggest a size + // for the file to write. Use a negative value if the size is unknown. + WriteFileReader(path string, r io.Reader, size int) (int64, bool, error) // WriteFile adds a file to the filesystem. Returns the size of the data that has been // stored in bytes and whether the file is new. The size is negative if there was diff --git a/io/fs/fs_test.go b/io/fs/fs_test.go index 20a14662..9d52dddb 100644 --- a/io/fs/fs_test.go +++ b/io/fs/fs_test.go @@ -186,7 +186,7 @@ func testWriteFileSafe(t *testing.T, fs Filesystem) { func testWriteFileReader(t *testing.T, fs Filesystem) { data := strings.NewReader("xxxxx") - size, created, err := fs.WriteFileReader("/foobar", data) + size, created, err := fs.WriteFileReader("/foobar", data, -1) require.Nil(t, err) require.Equal(t, int64(5), size) @@ -211,7 +211,7 @@ func testOpen(t *testing.T, fs Filesystem) { file := fs.Open("/foobar") require.Nil(t, file) - _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx")) + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx"), -1) require.NoError(t, err) file = fs.Open("/foobar") @@ -232,7 +232,7 @@ func testRemove(t *testing.T, fs Filesystem) { data := strings.NewReader("xxxxx") - fs.WriteFileReader("/foobar", data) + fs.WriteFileReader("/foobar", data, -1) size = fs.Remove("/foobar") @@ -251,7 +251,7 @@ func testRemove(t *testing.T, fs Filesystem) { func testFiles(t *testing.T, fs Filesystem) { require.Equal(t, int64(0), fs.Files()) - fs.WriteFileReader("/foobar.txt", strings.NewReader("bar")) + fs.WriteFileReader("/foobar.txt", strings.NewReader("bar"), -1) require.Equal(t, int64(1), fs.Files()) @@ -267,7 +267,7 @@ func testFiles(t *testing.T, fs Filesystem) { func testReplace(t *testing.T, fs Filesystem) { data := strings.NewReader("xxxxx") - size, created, err := fs.WriteFileReader("/foobar", data) + size, created, err := fs.WriteFileReader("/foobar", data, -1) require.Nil(t, err) require.Equal(t, int64(5), size) @@ -284,7 +284,7 @@ func testReplace(t *testing.T, fs Filesystem) { data = strings.NewReader("yyy") - size, created, err = fs.WriteFileReader("/foobar", data) + size, created, err = fs.WriteFileReader("/foobar", data, -1) require.Nil(t, err) require.Equal(t, int64(3), size) @@ -301,12 +301,12 @@ func testReplace(t *testing.T, fs Filesystem) { } func testList(t *testing.T, fs Filesystem) { - fs.WriteFileReader("/foobar1", strings.NewReader("a")) - fs.WriteFileReader("/foobar2", strings.NewReader("bb")) - fs.WriteFileReader("/foobar3", strings.NewReader("ccc")) - fs.WriteFileReader("/foobar4", strings.NewReader("dddd")) - fs.WriteFileReader("/path/foobar3", strings.NewReader("ccc")) - fs.WriteFileReader("/path/to/foobar4", strings.NewReader("dddd")) + fs.WriteFileReader("/foobar1", strings.NewReader("a"), -1) + fs.WriteFileReader("/foobar2", strings.NewReader("bb"), -1) + fs.WriteFileReader("/foobar3", strings.NewReader("ccc"), -1) + fs.WriteFileReader("/foobar4", strings.NewReader("dddd"), -1) + fs.WriteFileReader("/path/foobar3", strings.NewReader("ccc"), -1) + fs.WriteFileReader("/path/to/foobar4", strings.NewReader("dddd"), -1) cur, max := fs.Size() @@ -337,10 +337,10 @@ func testList(t *testing.T, fs Filesystem) { } func testListGlob(t *testing.T, fs Filesystem) { - fs.WriteFileReader("/foobar1", strings.NewReader("a")) - fs.WriteFileReader("/path/foobar2", strings.NewReader("a")) - fs.WriteFileReader("/path/to/foobar3", strings.NewReader("a")) - fs.WriteFileReader("/foobar4", strings.NewReader("a")) + fs.WriteFileReader("/foobar1", strings.NewReader("a"), -1) + fs.WriteFileReader("/path/foobar2", strings.NewReader("a"), -1) + fs.WriteFileReader("/path/to/foobar3", strings.NewReader("a"), -1) + fs.WriteFileReader("/foobar4", strings.NewReader("a"), -1) cur := fs.Files() @@ -376,10 +376,10 @@ func testListGlob(t *testing.T, fs Filesystem) { } func testListSize(t *testing.T, fs Filesystem) { - fs.WriteFileReader("/a", strings.NewReader("a")) - fs.WriteFileReader("/aa", strings.NewReader("aa")) - fs.WriteFileReader("/aaa", strings.NewReader("aaa")) - fs.WriteFileReader("/aaaa", strings.NewReader("aaaa")) + fs.WriteFileReader("/a", strings.NewReader("a"), -1) + fs.WriteFileReader("/aa", strings.NewReader("aa"), -1) + fs.WriteFileReader("/aaa", strings.NewReader("aaa"), -1) + fs.WriteFileReader("/aaaa", strings.NewReader("aaaa"), -1) cur := fs.Files() @@ -411,13 +411,13 @@ func testListSize(t *testing.T, fs Filesystem) { } func testListModified(t *testing.T, fs Filesystem) { - fs.WriteFileReader("/a", strings.NewReader("a")) + fs.WriteFileReader("/a", strings.NewReader("a"), -1) time.Sleep(500 * time.Millisecond) - fs.WriteFileReader("/b", strings.NewReader("b")) + fs.WriteFileReader("/b", strings.NewReader("b"), -1) time.Sleep(500 * time.Millisecond) - fs.WriteFileReader("/c", strings.NewReader("c")) + fs.WriteFileReader("/c", strings.NewReader("c"), -1) time.Sleep(500 * time.Millisecond) - fs.WriteFileReader("/d", strings.NewReader("d")) + fs.WriteFileReader("/d", strings.NewReader("d"), -1) cur := fs.Files() @@ -463,10 +463,10 @@ func testListModified(t *testing.T, fs Filesystem) { } func testRemoveAll(t *testing.T, fs Filesystem) { - fs.WriteFileReader("/foobar1", strings.NewReader("abc")) - fs.WriteFileReader("/path/foobar2", strings.NewReader("abc")) - fs.WriteFileReader("/path/to/foobar3", strings.NewReader("abc")) - fs.WriteFileReader("/foobar4", strings.NewReader("abc")) + fs.WriteFileReader("/foobar1", strings.NewReader("abc"), -1) + fs.WriteFileReader("/path/foobar2", strings.NewReader("abc"), -1) + fs.WriteFileReader("/path/to/foobar3", strings.NewReader("abc"), -1) + fs.WriteFileReader("/foobar4", strings.NewReader("abc"), -1) cur := fs.Files() @@ -483,10 +483,10 @@ func testRemoveAll(t *testing.T, fs Filesystem) { } func testRemoveList(t *testing.T, fs Filesystem) { - fs.WriteFileReader("/foobar1", strings.NewReader("abc")) - fs.WriteFileReader("/path/foobar2", strings.NewReader("abc")) - fs.WriteFileReader("/path/to/foobar3", strings.NewReader("abc")) - fs.WriteFileReader("/foobar4", strings.NewReader("abc")) + fs.WriteFileReader("/foobar1", strings.NewReader("abc"), -1) + fs.WriteFileReader("/path/foobar2", strings.NewReader("abc"), -1) + fs.WriteFileReader("/path/to/foobar3", strings.NewReader("abc"), -1) + fs.WriteFileReader("/foobar4", strings.NewReader("abc"), -1) cur := fs.Files() @@ -513,7 +513,7 @@ func testData(t *testing.T, fs Filesystem) { data1 := strings.NewReader(data) - _, _, err = fs.WriteFileReader("/foobar", data1) + _, _, err = fs.WriteFileReader("/foobar", data1, -1) require.NoError(t, err) file = fs.Open("/foobar") @@ -542,7 +542,7 @@ func testStatDir(t *testing.T, fs Filesystem) { require.NotNil(t, info) require.Equal(t, true, info.IsDir()) - fs.WriteFileReader("/these/are/some/directories/foobar", strings.NewReader("gduwotoxqb")) + fs.WriteFileReader("/these/are/some/directories/foobar", strings.NewReader("gduwotoxqb"), -1) info, err = fs.Stat("/foobar") require.Error(t, err) @@ -614,7 +614,7 @@ func testMkdirAll(t *testing.T, fs Filesystem) { require.Equal(t, int64(0), info.Size()) require.Equal(t, true, info.IsDir()) - _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb"), -1) require.NoError(t, err) err = fs.MkdirAll("/foobar", 0755) @@ -631,7 +631,7 @@ func testRename(t *testing.T, fs Filesystem) { _, err = fs.Stat("/foobaz") require.Error(t, err) - _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb"), -1) require.NoError(t, err) _, err = fs.Stat("/foobar") @@ -654,10 +654,10 @@ func testRenameOverwrite(t *testing.T, fs Filesystem) { _, err = fs.Stat("/foobaz") require.Error(t, err) - _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"), -1) require.NoError(t, err) - _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz"), -1) require.NoError(t, err) _, err = fs.Stat("/foobar") @@ -688,7 +688,7 @@ func testSymlink(t *testing.T, fs Filesystem) { err := fs.Symlink("/foobar", "/foobaz") require.Error(t, err) - _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"), -1) require.NoError(t, err) err = fs.Symlink("/foobar", "/foobaz") @@ -729,7 +729,7 @@ func testSymlinkOpenStat(t *testing.T, fs Filesystem) { return } - _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar"), -1) require.NoError(t, err) err = fs.Symlink("/foobar", "/foobaz") @@ -756,7 +756,7 @@ func testSymlinkOpenStat(t *testing.T, fs Filesystem) { } func testStat(t *testing.T, fs Filesystem) { - _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar"), -1) require.NoError(t, err) file := fs.Open("/foobar") @@ -781,7 +781,7 @@ func testCopy(t *testing.T, fs Filesystem) { _, err = fs.Stat("/foobaz") require.Error(t, err) - _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb"), -1) require.NoError(t, err) _, err = fs.Stat("/foobar") @@ -804,10 +804,10 @@ func testCopyOverwrite(t *testing.T, fs Filesystem) { _, err = fs.Stat("/foobaz") require.Error(t, err) - _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"), -1) require.NoError(t, err) - _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz"), -1) require.NoError(t, err) _, err = fs.Stat("/foobar") @@ -838,10 +838,10 @@ func testSymlinkErrors(t *testing.T, fs Filesystem) { err := fs.Symlink("/foobar", "/foobaz") require.Error(t, err) - _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"), -1) require.NoError(t, err) - _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz"), -1) require.NoError(t, err) err = fs.Symlink("/foobar", "/foobaz") diff --git a/io/fs/mem.go b/io/fs/mem.go index 6085bc7d..2033d349 100644 --- a/io/fs/mem.go +++ b/io/fs/mem.go @@ -2,6 +2,7 @@ package fs import ( "bytes" + "errors" "fmt" "io" "io/fs" @@ -127,14 +128,76 @@ type memFilesystem struct { // Storage backend storage memStorage + dirs *dirStorage +} + +type dirStorage struct { + dirs map[string]uint64 + lock sync.RWMutex +} + +func newDirStorage() *dirStorage { + s := &dirStorage{ + dirs: map[string]uint64{}, + } + + s.dirs["/"] = 1 + + return s +} + +func (s *dirStorage) Has(path string) bool { + s.lock.RLock() + defer s.lock.RUnlock() + + _, hasDir := s.dirs[path] + + return hasDir +} + +func (s *dirStorage) Add(path string) { + dir := filepath.Dir(path) + elements := strings.Split(dir, "/") + + s.lock.Lock() + defer s.lock.Unlock() + + p := "/" + for _, e := range elements { + p = filepath.Join(p, e) + n := s.dirs[p] + n++ + s.dirs[p] = n + } +} + +func (s *dirStorage) Remove(path string) { + dir := filepath.Dir(path) + elements := strings.Split(dir, "/") + + s.lock.Lock() + defer s.lock.Unlock() + + p := "/" + for _, e := range elements { + p = filepath.Join(p, e) + n := s.dirs[p] + n-- + if n == 0 { + delete(s.dirs, p) + } else { + s.dirs[p] = n + } + } } // NewMemFilesystem creates a new filesystem in memory that implements // the Filesystem interface. func NewMemFilesystem(config MemConfig) (Filesystem, error) { fs := &memFilesystem{ - metadata: make(map[string]string), + metadata: map[string]string{}, logger: config.Logger, + dirs: newDirStorage(), } if fs.logger == nil { @@ -187,7 +250,7 @@ func NewMemFilesystemFromDir(dir string, config MemConfig) (Filesystem, error) { defer file.Close() - _, _, err = mem.WriteFileReader(strings.TrimPrefix(path, dir), file) + _, _, err = mem.WriteFileReader(strings.TrimPrefix(path, dir), file, int(info.Size())) if err != nil { return fmt.Errorf("can't copy %s", path) } @@ -327,12 +390,16 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error { }, } - oldFile, loaded := fs.storage.Store(newname, newFile) + oldFile, replaced := fs.storage.Store(newname, newFile) + + if !replaced { + fs.dirs.Add(newname) + } fs.sizeLock.Lock() defer fs.sizeLock.Unlock() - if loaded { + if replaced { oldFile.Close() fs.currentSize -= oldFile.size } @@ -342,7 +409,44 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error { return nil } -func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { +var chunkPool = sync.Pool{ + New: func() interface{} { + chunk := make([]byte, 128*1024) + return &chunk + }, +} + +func copyToBufferFromReader(buf *bytes.Buffer, r io.Reader, _ int) (int64, error) { + chunkPtr := chunkPool.Get().(*[]byte) + chunk := *chunkPtr + defer chunkPool.Put(chunkPtr) + + size := int64(0) + + for { + n, err := r.Read(chunk) + if n != 0 { + buf.Write(chunk[:n]) + size += int64(n) + } + + if err != nil { + if errors.Is(err, io.EOF) { + return size, nil + } + + return size, err + } + + if n == 0 { + break + } + } + + return size, nil +} + +func (fs *memFilesystem) WriteFileReader(path string, r io.Reader, sizeHint int) (int64, bool, error) { path = fs.cleanPath(path) isdir := fs.isDir(path) @@ -360,7 +464,11 @@ func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, data: &bytes.Buffer{}, } - size, err := newFile.data.ReadFrom(r) + if sizeHint > 0 { + newFile.data.Grow(sizeHint) + } + + size, err := copyToBufferFromReader(newFile.data, r, 8*1024) if err != nil { fs.logger.WithFields(log.Fields{ "path": path, @@ -377,6 +485,10 @@ func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, oldFile, replace := fs.storage.Store(path, newFile) + if !replace { + fs.dirs.Add(path) + } + fs.sizeLock.Lock() defer fs.sizeLock.Unlock() @@ -404,11 +516,11 @@ func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, } func (fs *memFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { - return fs.WriteFileReader(path, bytes.NewReader(data)) + return fs.WriteFileReader(path, bytes.NewReader(data), len(data)) } func (fs *memFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { - return fs.WriteFileReader(path, bytes.NewReader(data)) + return fs.WriteFileReader(path, bytes.NewReader(data), len(data)) } func (fs *memFilesystem) Purge(size int64) int64 { @@ -430,6 +542,8 @@ func (fs *memFilesystem) Purge(size int64) int64 { size -= f.size freed += f.size + fs.dirs.Remove(f.name) + fs.sizeLock.Lock() fs.currentSize -= f.size fs.sizeLock.Unlock() @@ -464,16 +578,7 @@ func (fs *memFilesystem) MkdirAll(path string, perm os.FileMode) error { return ErrExist } - f := &memFile{ - memFileInfo: memFileInfo{ - name: path, - size: 0, - dir: true, - lastMod: time.Now(), - }, - } - - fs.storage.Store(path, f) + fs.dirs.Add(filepath.Join(path, "x")) return nil } @@ -494,6 +599,11 @@ func (fs *memFilesystem) Rename(src, dst string) error { dstFile, replace := fs.storage.Store(dst, srcFile) fs.storage.Delete(src) + fs.dirs.Remove(src) + if !replace { + fs.dirs.Add(dst) + } + fs.sizeLock.Lock() defer fs.sizeLock.Unlock() @@ -540,6 +650,10 @@ func (fs *memFilesystem) Copy(src, dst string) error { f, replace := fs.storage.Store(dst, dstFile) + if !replace { + fs.dirs.Add(dst) + } + fs.sizeLock.Lock() defer fs.sizeLock.Unlock() @@ -600,31 +714,7 @@ func (fs *memFilesystem) stat(path string) (FileInfo, error) { } func (fs *memFilesystem) isDir(path string) bool { - file, ok := fs.storage.Load(path) - if ok { - return file.dir - } - - if !strings.HasSuffix(path, "/") { - path = path + "/" - } - - if path == "/" { - return true - } - - found := false - - fs.storage.Range(func(k string, _ *memFile) bool { - if strings.HasPrefix(k, path) { - found = true - return false - } - - return true - }) - - return found + return fs.dirs.Has(path) } func (fs *memFilesystem) Remove(path string) int64 { @@ -638,6 +728,8 @@ func (fs *memFilesystem) remove(path string) int64 { if ok { file.Close() + fs.dirs.Remove(path) + fs.sizeLock.Lock() defer fs.sizeLock.Unlock() @@ -722,6 +814,8 @@ func (fs *memFilesystem) RemoveList(path string, options ListOptions) ([]string, size += file.size names = append(names, file.name) + fs.dirs.Remove(file.name) + file.Close() } diff --git a/io/fs/mem_test.go b/io/fs/mem_test.go index 08df7d12..0f8c1758 100644 --- a/io/fs/mem_test.go +++ b/io/fs/mem_test.go @@ -1,6 +1,7 @@ package fs import ( + "bytes" "context" "fmt" "io" @@ -107,6 +108,25 @@ func TestWriteWhileRead(t *testing.T) { require.Equal(t, []byte("xxxxx"), data) } +func BenchmarkMemWriteFile(b *testing.B) { + mem, err := NewMemFilesystem(MemConfig{}) + require.NoError(b, err) + + nFiles := 50000 + + for i := 0; i < nFiles; i++ { + path := fmt.Sprintf("/%d.dat", i) + mem.WriteFile(path, []byte(rand.StringAlphanumeric(1))) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + path := fmt.Sprintf("/%d.dat", i%nFiles) + mem.WriteFile(path, []byte(rand.StringAlphanumeric(1))) + } +} + func BenchmarkMemReadFileWhileWriting(b *testing.B) { mem, err := NewMemFilesystem(MemConfig{}) require.NoError(b, err) @@ -171,3 +191,36 @@ func BenchmarkMemReadFileWhileWriting(b *testing.B) { readerWg.Wait() } + +func BenchmarkBufferReadFrom(b *testing.B) { + data := []byte(rand.StringAlphanumeric(1024 * 1024)) + + for i := 0; i < b.N; i++ { + r := bytes.NewReader(data) + buf := &bytes.Buffer{} + buf.ReadFrom(r) + } +} + +func TestBufferReadChunks(t *testing.T) { + data := []byte(rand.StringAlphanumeric(1024 * 1024)) + + r := bytes.NewReader(data) + buf := &bytes.Buffer{} + + copyToBufferFromReader(buf, r, 32*1024) + + res := bytes.Compare(data, buf.Bytes()) + require.Equal(t, 0, res) +} + +func BenchmarkBufferReadChunks(b *testing.B) { + data := []byte(rand.StringAlphanumeric(1024 * 1024)) + + for i := 0; i < b.N; i++ { + r := bytes.NewReader(data) + buf := &bytes.Buffer{} + + copyToBufferFromReader(buf, r, 32*1024) + } +} diff --git a/io/fs/readonly.go b/io/fs/readonly.go index 2991944c..cf42d911 100644 --- a/io/fs/readonly.go +++ b/io/fs/readonly.go @@ -21,7 +21,7 @@ func (r *readOnlyFilesystem) Symlink(oldname, newname string) error { return os.ErrPermission } -func (r *readOnlyFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) { +func (r *readOnlyFilesystem) WriteFileReader(path string, rd io.Reader, size int) (int64, bool, error) { return -1, false, os.ErrPermission } diff --git a/io/fs/readonly_test.go b/io/fs/readonly_test.go index 3b3b3dfb..6a784299 100644 --- a/io/fs/readonly_test.go +++ b/io/fs/readonly_test.go @@ -20,7 +20,7 @@ func TestReadOnly(t *testing.T) { _, _, err = ro.WriteFile("/readonly.go", []byte("foobar")) require.Error(t, err) - _, _, err = ro.WriteFileReader("/readonly.go", strings.NewReader("foobar")) + _, _, err = ro.WriteFileReader("/readonly.go", strings.NewReader("foobar"), -1) require.Error(t, err) _, _, err = ro.WriteFileSafe("/readonly.go", []byte("foobar")) diff --git a/io/fs/s3.go b/io/fs/s3.go index 6d7807d8..c75162ed 100644 --- a/io/fs/s3.go +++ b/io/fs/s3.go @@ -347,17 +347,17 @@ func (fs *s3Filesystem) write(path string, r io.Reader) (int64, bool, error) { return info.Size, !overwrite, nil } -func (fs *s3Filesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { +func (fs *s3Filesystem) WriteFileReader(path string, r io.Reader, size int) (int64, bool, error) { path = fs.cleanPath(path) return fs.write(path, r) } func (fs *s3Filesystem) WriteFile(path string, data []byte) (int64, bool, error) { - return fs.WriteFileReader(path, bytes.NewReader(data)) + return fs.WriteFileReader(path, bytes.NewReader(data), len(data)) } func (fs *s3Filesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { - return fs.WriteFileReader(path, bytes.NewReader(data)) + return fs.WriteFileReader(path, bytes.NewReader(data), len(data)) } func (fs *s3Filesystem) Rename(src, dst string) error { diff --git a/io/fs/sized.go b/io/fs/sized.go index 366ef6f5..aa6b552b 100644 --- a/io/fs/sized.go +++ b/io/fs/sized.go @@ -65,14 +65,14 @@ func (r *sizedFilesystem) Resize(size int64) error { return nil } -func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) { +func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader, sizeHint int) (int64, bool, error) { currentSize, maxSize := r.Size() if maxSize <= 0 { - return r.Filesystem.WriteFileReader(path, rd) + return r.Filesystem.WriteFileReader(path, rd, sizeHint) } data := bytes.Buffer{} - size, err := data.ReadFrom(rd) + size, err := copyToBufferFromReader(&data, rd, 8*1024) if err != nil { return -1, false, err } @@ -97,11 +97,11 @@ func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader) (int64, boo } } - return r.Filesystem.WriteFileReader(path, &data) + return r.Filesystem.WriteFileReader(path, &data, int(size)) } func (r *sizedFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { - return r.WriteFileReader(path, bytes.NewBuffer(data)) + return r.WriteFileReader(path, bytes.NewBuffer(data), len(data)) } func (r *sizedFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { diff --git a/io/fs/sized_test.go b/io/fs/sized_test.go index e158c422..b5eebe6e 100644 --- a/io/fs/sized_test.go +++ b/io/fs/sized_test.go @@ -52,7 +52,7 @@ func TestSizedResizePurge(t *testing.T) { require.Equal(t, int64(0), cur) require.Equal(t, int64(10), max) - fs.WriteFileReader("/foobar", strings.NewReader("xxxxxxxxxx")) + fs.WriteFileReader("/foobar", strings.NewReader("xxxxxxxxxx"), -1) cur, max = fs.Size() @@ -76,7 +76,7 @@ func TestSizedWrite(t *testing.T) { require.Equal(t, int64(0), cur) require.Equal(t, int64(10), max) - size, created, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx")) + size, created, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx"), -1) require.NoError(t, err) require.Equal(t, int64(5), size) require.Equal(t, true, created) @@ -89,7 +89,7 @@ func TestSizedWrite(t *testing.T) { _, _, err = fs.WriteFile("/foobaz", []byte("xxxxxx")) require.Error(t, err) - _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("xxxxxx")) + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("xxxxxx"), -1) require.Error(t, err) _, _, err = fs.WriteFileSafe("/foobaz", []byte("xxxxxx")) @@ -101,7 +101,7 @@ func TestSizedReplaceNoPurge(t *testing.T) { data := strings.NewReader("xxxxx") - size, created, err := fs.WriteFileReader("/foobar", data) + size, created, err := fs.WriteFileReader("/foobar", data, -1) require.Nil(t, err) require.Equal(t, int64(5), size) @@ -118,7 +118,7 @@ func TestSizedReplaceNoPurge(t *testing.T) { data = strings.NewReader("yyy") - size, created, err = fs.WriteFileReader("/foobar", data) + size, created, err = fs.WriteFileReader("/foobar", data, -1) require.Nil(t, err) require.Equal(t, int64(3), size) @@ -141,9 +141,9 @@ func TestSizedReplacePurge(t *testing.T) { data2 := strings.NewReader("yyy") data3 := strings.NewReader("zzz") - fs.WriteFileReader("/foobar1", data1) - fs.WriteFileReader("/foobar2", data2) - fs.WriteFileReader("/foobar3", data3) + fs.WriteFileReader("/foobar1", data1, -1) + fs.WriteFileReader("/foobar2", data2, -1) + fs.WriteFileReader("/foobar3", data3, -1) cur, max := fs.Size() @@ -156,7 +156,7 @@ func TestSizedReplacePurge(t *testing.T) { data4 := strings.NewReader("zzzzz") - size, _, _ := fs.WriteFileReader("/foobar1", data4) + size, _, _ := fs.WriteFileReader("/foobar1", data4, -1) require.Equal(t, int64(5), size) @@ -175,7 +175,7 @@ func TestSizedReplaceUnlimited(t *testing.T) { data := strings.NewReader("xxxxx") - size, created, err := fs.WriteFileReader("/foobar", data) + size, created, err := fs.WriteFileReader("/foobar", data, -1) require.Nil(t, err) require.Equal(t, int64(5), size) @@ -192,7 +192,7 @@ func TestSizedReplaceUnlimited(t *testing.T) { data = strings.NewReader("yyy") - size, created, err = fs.WriteFileReader("/foobar", data) + size, created, err = fs.WriteFileReader("/foobar", data, -1) require.Nil(t, err) require.Equal(t, int64(3), size) @@ -213,7 +213,7 @@ func TestSizedTooBigNoPurge(t *testing.T) { data := strings.NewReader("xxxxxyyyyyz") - size, _, err := fs.WriteFileReader("/foobar", data) + size, _, err := fs.WriteFileReader("/foobar", data, -1) require.Error(t, err) require.Equal(t, int64(-1), size) } @@ -224,12 +224,12 @@ func TestSizedTooBigPurge(t *testing.T) { data1 := strings.NewReader("xxxxx") data2 := strings.NewReader("yyyyy") - fs.WriteFileReader("/foobar1", data1) - fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar1", data1, -1) + fs.WriteFileReader("/foobar2", data2, -1) data := strings.NewReader("xxxxxyyyyyz") - size, _, err := fs.WriteFileReader("/foobar", data) + size, _, err := fs.WriteFileReader("/foobar", data, -1) require.Error(t, err) require.Equal(t, int64(-1), size) @@ -242,8 +242,8 @@ func TestSizedFullSpaceNoPurge(t *testing.T) { data1 := strings.NewReader("xxxxx") data2 := strings.NewReader("yyyyy") - fs.WriteFileReader("/foobar1", data1) - fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar1", data1, -1) + fs.WriteFileReader("/foobar2", data2, -1) cur, max := fs.Size() @@ -256,7 +256,7 @@ func TestSizedFullSpaceNoPurge(t *testing.T) { data3 := strings.NewReader("zzzzz") - size, _, err := fs.WriteFileReader("/foobar3", data3) + size, _, err := fs.WriteFileReader("/foobar3", data3, -1) require.Error(t, err) require.Equal(t, int64(-1), size) } @@ -267,8 +267,8 @@ func TestSizedFullSpacePurge(t *testing.T) { data1 := strings.NewReader("xxxxx") data2 := strings.NewReader("yyyyy") - fs.WriteFileReader("/foobar1", data1) - fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar1", data1, -1) + fs.WriteFileReader("/foobar2", data2, -1) cur, max := fs.Size() @@ -281,7 +281,7 @@ func TestSizedFullSpacePurge(t *testing.T) { data3 := strings.NewReader("zzzzz") - size, _, _ := fs.WriteFileReader("/foobar3", data3) + size, _, _ := fs.WriteFileReader("/foobar3", data3, -1) require.Equal(t, int64(5), size) @@ -302,9 +302,9 @@ func TestSizedFullSpacePurgeMulti(t *testing.T) { data2 := strings.NewReader("yyy") data3 := strings.NewReader("zzz") - fs.WriteFileReader("/foobar1", data1) - fs.WriteFileReader("/foobar2", data2) - fs.WriteFileReader("/foobar3", data3) + fs.WriteFileReader("/foobar1", data1, -1) + fs.WriteFileReader("/foobar2", data2, -1) + fs.WriteFileReader("/foobar3", data3, -1) cur, max := fs.Size() @@ -317,7 +317,7 @@ func TestSizedFullSpacePurgeMulti(t *testing.T) { data4 := strings.NewReader("zzzzz") - size, _, _ := fs.WriteFileReader("/foobar4", data4) + size, _, _ := fs.WriteFileReader("/foobar4", data4, -1) require.Equal(t, int64(5), size) @@ -338,11 +338,11 @@ func TestSizedPurgeOrder(t *testing.T) { data2 := strings.NewReader("yyyyy") data3 := strings.NewReader("zzzzz") - fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar1", data1, -1) time.Sleep(1 * time.Second) - fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar2", data2, -1) time.Sleep(1 * time.Second) - fs.WriteFileReader("/foobar3", data3) + fs.WriteFileReader("/foobar3", data3, -1) file := fs.Open("/foobar1") diff --git a/process/limiter.go b/process/limiter.go index 5a11869c..79f791e3 100644 --- a/process/limiter.go +++ b/process/limiter.go @@ -91,10 +91,13 @@ type limiter struct { ncpu float64 ncpuFactor float64 proc psutil.Process - lock sync.Mutex + lock sync.RWMutex cancel context.CancelFunc onLimit LimitFunc + lastUsage Usage + lastUsageLock sync.RWMutex + cpu float64 // CPU limit cpuCurrent float64 // Current CPU load of this process cpuLast float64 // Last CPU load of this process @@ -150,6 +153,10 @@ func NewLimiter(config LimiterConfig) Limiter { l.ncpu = ncpu } + l.lastUsage.CPU.NCPU = l.ncpu + l.lastUsage.CPU.Limit = l.cpu * l.ncpu + l.lastUsage.Memory.Limit = l.memory + l.ncpuFactor = 1 mode := "hard" @@ -208,7 +215,7 @@ func (l *limiter) Start(process psutil.Process) error { ctx, cancel := context.WithCancel(context.Background()) l.cancel = cancel - go l.ticker(ctx, 1000*time.Millisecond) + go l.ticker(ctx, time.Second) if l.mode == LimitModeSoft { ctx, cancel = context.WithCancel(context.Background()) @@ -255,15 +262,21 @@ func (l *limiter) ticker(ctx context.Context, interval time.Duration) { } } -func (l *limiter) collect(t time.Time) { +func (l *limiter) collect(_ time.Time) { l.lock.Lock() - defer l.lock.Unlock() + proc := l.proc + l.lock.Unlock() - if l.proc == nil { + if proc == nil { return } - if mstat, err := l.proc.VirtualMemory(); err == nil { + mstat, merr := proc.VirtualMemory() + cpustat, cerr := proc.CPUPercent() + + l.lock.Lock() + + if merr == nil { l.memoryLast, l.memoryCurrent = l.memoryCurrent, mstat if l.memoryCurrent > l.memoryMax { @@ -281,7 +294,7 @@ func (l *limiter) collect(t time.Time) { l.memoryAvg = ((l.memoryAvg * float64(l.memoryAvgCounter-1)) + float64(l.memoryCurrent)) / float64(l.memoryAvgCounter) } - if cpustat, err := l.proc.CPUPercent(); err == nil { + if cerr == nil { l.cpuLast, l.cpuCurrent = l.cpuCurrent, (cpustat.System+cpustat.User+cpustat.Other)/100 if l.cpuCurrent > l.cpuMax { @@ -354,6 +367,19 @@ func (l *limiter) collect(t time.Time) { if isLimitExceeded { go l.onLimit(l.cpuCurrent*l.ncpuFactor*100, l.memoryCurrent) } + + l.lastUsageLock.Lock() + l.lastUsage.CPU.Current = l.cpuCurrent * l.ncpu * 100 + l.lastUsage.CPU.Average = l.cpuAvg * l.ncpu * 100 + l.lastUsage.CPU.Max = l.cpuMax * l.ncpu * 100 + l.lastUsage.CPU.IsThrottling = l.cpuThrottling + + l.lastUsage.Memory.Current = l.memoryCurrent + l.lastUsage.Memory.Average = l.memoryAvg + l.lastUsage.Memory.Max = l.memoryMax + l.lastUsageLock.Unlock() + + l.lock.Unlock() } func (l *limiter) Limit(cpu, memory bool) error { @@ -430,10 +456,12 @@ func (l *limiter) limitCPU(ctx context.Context, limit float64, interval time.Dur if factorTopLimit > 0 { factorTopLimit -= 10 } else { - if l.proc != nil { - l.proc.Resume() + if l.cpuThrottling { + if l.proc != nil { + l.proc.Resume() + } + l.cpuThrottling = false } - l.cpuThrottling = false l.lock.Unlock() time.Sleep(100 * time.Millisecond) continue @@ -498,34 +526,20 @@ func (l *limiter) limitCPU(ctx context.Context, limit float64, interval time.Dur } func (l *limiter) Current() (cpu float64, memory uint64) { - l.lock.Lock() - defer l.lock.Unlock() + l.lastUsageLock.RLock() + defer l.lastUsageLock.RUnlock() - cpu = l.cpuCurrent * 100 - memory = l.memoryCurrent * 100 + cpu = l.lastUsage.CPU.Current / l.ncpu + memory = l.lastUsage.Memory.Current return } func (l *limiter) Usage() Usage { - l.lock.Lock() - defer l.lock.Unlock() - - usage := Usage{} - - usage.CPU.NCPU = l.ncpu - usage.CPU.Limit = l.cpu * l.ncpu * 100 - usage.CPU.Current = l.cpuCurrent * l.ncpu * 100 - usage.CPU.Average = l.cpuAvg * l.ncpu * 100 - usage.CPU.Max = l.cpuMax * l.ncpu * 100 - usage.CPU.IsThrottling = l.cpuThrottling - - usage.Memory.Limit = l.memory - usage.Memory.Current = l.memoryCurrent - usage.Memory.Average = l.memoryAvg - usage.Memory.Max = l.memoryMax + l.lastUsageLock.RLock() + defer l.lastUsageLock.RUnlock() - return usage + return l.lastUsage } func (l *limiter) Limits() (cpu float64, memory uint64) { diff --git a/process/parser.go b/process/parser.go index 1eeeb1b6..c7ad38ed 100644 --- a/process/parser.go +++ b/process/parser.go @@ -10,7 +10,7 @@ type Parser interface { // Parse parses the given line and returns an indicator // for progress (e.g. based on the contents of the line, // or previous line, ...) - Parse(line string) uint64 + Parse(line []byte) uint64 // Stop tells the parser that the process stopped and provides // its exit state. @@ -50,12 +50,12 @@ func NewNullParser() Parser { var _ Parser = &nullParser{} -func (p *nullParser) Parse(string) uint64 { return 1 } -func (p *nullParser) Stop(string, Usage) {} -func (p *nullParser) ResetStats() {} -func (p *nullParser) ResetLog() {} -func (p *nullParser) Log() []Line { return []Line{} } -func (p *nullParser) IsRunning() bool { return true } +func (p *nullParser) Parse(line []byte) uint64 { return 1 } +func (p *nullParser) Stop(string, Usage) {} +func (p *nullParser) ResetStats() {} +func (p *nullParser) ResetLog() {} +func (p *nullParser) Log() []Line { return []Line{} } +func (p *nullParser) IsRunning() bool { return true } type bufferParser struct { log []Line @@ -69,10 +69,10 @@ func NewBufferParser() Parser { var _ Parser = &bufferParser{} -func (p *bufferParser) Parse(line string) uint64 { +func (p *bufferParser) Parse(line []byte) uint64 { p.log = append(p.log, Line{ Timestamp: time.Now(), - Data: line, + Data: string(line), }) return 1 } diff --git a/process/process.go b/process/process.go index 44e66516..c6bc01c8 100644 --- a/process/process.go +++ b/process/process.go @@ -167,17 +167,16 @@ type States struct { // Process represents a ffmpeg process type process struct { - binary string - args []string - cmd *exec.Cmd - pid int32 - stdout io.ReadCloser - lastLine string - state struct { + binary string + args []string + cmd *exec.Cmd + pid int32 + stdout io.ReadCloser + state struct { state stateType time time.Time states States - lock sync.Mutex + lock sync.RWMutex } order struct { order string @@ -401,8 +400,8 @@ func (p *process) setState(state stateType) (stateType, error) { } func (p *process) getState() stateType { - p.state.lock.Lock() - defer p.state.lock.Unlock() + p.state.lock.RLock() + defer p.state.lock.RUnlock() return p.state.state } @@ -431,15 +430,15 @@ func (p *process) setOrder(order string) bool { } func (p *process) isRunning() bool { - p.state.lock.Lock() - defer p.state.lock.Unlock() + p.state.lock.RLock() + defer p.state.lock.RUnlock() return p.state.state.IsRunning() } func (p *process) getStateString() string { - p.state.lock.Lock() - defer p.state.lock.Unlock() + p.state.lock.RLock() + defer p.state.lock.RUnlock() return p.state.state.String() } @@ -448,11 +447,11 @@ func (p *process) getStateString() string { func (p *process) Status() Status { usage := p.limits.Usage() - p.state.lock.Lock() + p.state.lock.RLock() stateTime := p.state.time state := p.state.state states := p.state.states - p.state.lock.Unlock() + p.state.lock.RUnlock() if state == stateRunning && !p.parser.IsRunning() { state = stateStarting @@ -575,7 +574,7 @@ func (p *process) start() error { if err != nil { p.setState(stateFailed) - p.parser.Parse(err.Error()) + p.parser.Parse([]byte(err.Error())) p.logger.WithError(err).Error().Log("Command failed") p.reconnect(p.delay(stateFailed)) @@ -587,7 +586,7 @@ func (p *process) start() error { if err := p.callbacks.onBeforeStart(); err != nil { p.setState(stateFailed) - p.parser.Parse(err.Error()) + p.parser.Parse([]byte(err.Error())) p.logger.WithError(err).Error().Log("Starting failed") p.reconnect(p.delay(stateFailed)) @@ -599,7 +598,7 @@ func (p *process) start() error { if err := p.cmd.Start(); err != nil { p.setState(stateFailed) - p.parser.Parse(err.Error()) + p.parser.Parse([]byte(err.Error())) p.logger.WithError(err).Error().Log("Command failed") p.reconnect(p.delay(stateFailed)) @@ -770,7 +769,7 @@ func (p *process) stop(wait bool, reason string) error { } if err != nil { - p.parser.Parse(err.Error()) + p.parser.Parse([]byte(err.Error())) p.debuglogger.WithFields(log.Fields{ "state": p.getStateString(), "order": p.getOrder(), @@ -857,7 +856,7 @@ func (p *process) staler(ctx context.Context) { // may kick in. func (p *process) reader() { scanner := bufio.NewScanner(p.stdout) - scanner.Split(scanLine) + scanner.Split(scanLines) // Reset the parser statistics p.parser.ResetStats() @@ -868,9 +867,7 @@ func (p *process) reader() { var n uint64 = 0 for scanner.Scan() { - line := scanner.Text() - - p.lastLine = line + line := scanner.Bytes() // Parse the output line from ffmpeg n = p.parser.Parse(line) @@ -886,12 +883,12 @@ func (p *process) reader() { if err := scanner.Err(); err != nil { p.logger.Debug().WithError(err).Log("") - p.parser.Parse(err.Error()) + p.parser.Parse([]byte(err.Error())) } p.stopReasonLock.Lock() if len(p.stopReason) != 0 { - p.parser.Parse(p.stopReason) + p.parser.Parse([]byte(p.stopReason)) p.stopReason = "" } p.stopReasonLock.Unlock() @@ -1062,7 +1059,7 @@ func (p *process) delay(state stateType) time.Duration { } // scanLine splits the data on \r, \n, or \r\n line endings -func scanLine(data []byte, atEOF bool) (advance int, token []byte, err error) { +func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { // Skip leading spaces. start := 0 for width := 0; start < len(data); start += width { diff --git a/process/process_test.go b/process/process_test.go index 1f6041eb..11c669b9 100644 --- a/process/process_test.go +++ b/process/process_test.go @@ -1,12 +1,15 @@ package process import ( + "bufio" + "bytes" "fmt" "sync" "testing" "time" "github.com/datarhei/core/v16/internal/testhelper" + "github.com/datarhei/core/v16/math/rand" "github.com/stretchr/testify/require" ) @@ -701,3 +704,63 @@ func TestProcessCallbacksOnBeforeStart(t *testing.T) { require.Equal(t, 1, len(lines)) require.Equal(t, "no, not now", lines[0].Data) } + +func BenchmarkScannerText(b *testing.B) { + data := []byte{} + + for i := 0; i < 1000; i++ { + line := rand.String(100) + "\n" + data = append(data, []byte(line)...) + } + + b.ResetTimer() + + lastline := "" + + for i := 0; i < b.N; i++ { + r := bytes.NewReader(data) + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanLines) + + for scanner.Scan() { + line := scanner.Text() + + lastline = line + } + + err := scanner.Err() + require.NoError(b, err) + } + + fmt.Printf("%s\n", lastline) +} + +func BenchmarkScannerBytes(b *testing.B) { + data := []byte{} + + for i := 0; i < 1000; i++ { + line := rand.String(100) + "\n" + data = append(data, []byte(line)...) + } + + b.ResetTimer() + + lastline := []byte{} + + for i := 0; i < b.N; i++ { + r := bytes.NewReader(data) + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanLines) + + for scanner.Scan() { + line := scanner.Bytes() + + lastline = line + } + + err := scanner.Err() + require.NoError(b, err) + } + + fmt.Printf("%s\n", lastline) +} diff --git a/psutil/process.go b/psutil/process.go index 7580d3bf..0789f553 100644 --- a/psutil/process.go +++ b/psutil/process.go @@ -41,6 +41,7 @@ type process struct { statPrevious cpuTimesStat statPreviousTime time.Time nTicks uint64 + memRSS uint64 } func (u *util) Process(pid int32) (Process, error) { @@ -60,7 +61,8 @@ func (u *util) Process(pid int32) (Process, error) { ctx, cancel := context.WithCancel(context.Background()) p.stopTicker = cancel - go p.tick(ctx, 1000*time.Millisecond) + go p.tickCPU(ctx, time.Second) + go p.tickMemory(ctx, time.Second) return p, nil } @@ -69,7 +71,7 @@ func NewProcess(pid int32, limit bool) (Process, error) { return DefaultUtil.Process(pid) } -func (p *process) tick(ctx context.Context, interval time.Duration) { +func (p *process) tickCPU(ctx context.Context, interval time.Duration) { ticker := time.NewTicker(interval) defer ticker.Stop() @@ -78,7 +80,7 @@ func (p *process) tick(ctx context.Context, interval time.Duration) { case <-ctx.Done(): return case t := <-ticker.C: - stat := p.collect() + stat := p.collectCPU() p.lock.Lock() p.statPrevious, p.statCurrent = p.statCurrent, stat @@ -89,7 +91,7 @@ func (p *process) tick(ctx context.Context, interval time.Duration) { } } -func (p *process) collect() cpuTimesStat { +func (p *process) collectCPU() cpuTimesStat { stat, err := p.cpuTimes() if err != nil { return cpuTimesStat{ @@ -101,6 +103,33 @@ func (p *process) collect() cpuTimesStat { return *stat } +func (p *process) tickMemory(ctx context.Context, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + rss := p.collectMemory() + + p.lock.Lock() + p.memRSS = rss + p.lock.Unlock() + } + } +} + +func (p *process) collectMemory() uint64 { + info, err := p.proc.MemoryInfo() + if err != nil { + return 0 + } + + return info.RSS +} + func (p *process) Stop() { p.stopTicker() } @@ -113,26 +142,6 @@ func (p *process) Resume() error { return p.proc.Resume() } -func (p *process) cpuTimes() (*cpuTimesStat, error) { - times, err := p.proc.Times() - if err != nil { - return nil, err - } - - s := &cpuTimesStat{ - total: cpuTotal(times), - system: times.System, - user: times.User, - } - - s.other = s.total - s.system - s.user - if s.other < 0.0001 { - s.other = 0 - } - - return s, nil -} - func (p *process) CPUPercent() (*CPUInfoStat, error) { var diff float64 @@ -178,10 +187,8 @@ func (p *process) CPUPercent() (*CPUInfoStat, error) { } func (p *process) VirtualMemory() (uint64, error) { - info, err := p.proc.MemoryInfo() - if err != nil { - return 0, err - } + p.lock.RLock() + defer p.lock.RUnlock() - return info.RSS, nil + return p.memRSS, nil } diff --git a/psutil/process_linux.go b/psutil/process_linux.go new file mode 100644 index 00000000..e5b8894a --- /dev/null +++ b/psutil/process_linux.go @@ -0,0 +1,97 @@ +//go:build linux +// +build linux + +package psutil + +import ( + "bytes" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/tklauser/go-sysconf" +) + +// Extracted from "github.com/shirou/gopsutil/v3/process/process_linux.go" +// We only need the CPU times. p.proc.Times() calls a function that is +// doing more than we actually need. + +var clockTicks = 100 // default value + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + clockTicks = int(clkTck) + } +} + +func (p *process) cpuTimes() (*cpuTimesStat, error) { + value := os.Getenv("HOST_PROC") + if value == "" { + value = "/proc" + } + + path := filepath.Join(value, strconv.FormatInt(int64(p.pid), 10), "stat") + + contents, err := os.ReadFile(path) + if err != nil { + return nil, err + } + // Indexing from one, as described in `man proc` about the file /proc/[pid]/stat + fields := splitProcStat(contents) + + utime, err := strconv.ParseFloat(fields[14], 64) + if err != nil { + return nil, err + } + + stime, err := strconv.ParseFloat(fields[15], 64) + if err != nil { + return nil, err + } + + // There is no such thing as iotime in stat file. As an approximation, we + // will use delayacct_blkio_ticks (aggregated block I/O delays, as per Linux + // docs). Note: I am assuming at least Linux 2.6.18 + var iotime float64 + if len(fields) > 42 { + iotime, err = strconv.ParseFloat(fields[42], 64) + if err != nil { + iotime = 0 // Ancient linux version, most likely + } + } else { + iotime = 0 // e.g. SmartOS containers + } + + userTime := utime / float64(clockTicks) + systemTime := stime / float64(clockTicks) + iowaitTime := iotime / float64(clockTicks) + + s := &cpuTimesStat{ + total: userTime + systemTime + iowaitTime, + system: systemTime, + user: userTime, + other: iowaitTime, + } + + if s.other < 0.0001 { + s.other = 0 + } + + return s, nil +} + +func splitProcStat(content []byte) []string { + nameStart := bytes.IndexByte(content, '(') + nameEnd := bytes.LastIndexByte(content, ')') + restFields := strings.Fields(string(content[nameEnd+2:])) // +2 skip ') ' + name := content[nameStart+1 : nameEnd] + pid := strings.TrimSpace(string(content[:nameStart])) + fields := make([]string, 3, len(restFields)+3) + fields[1] = string(pid) + fields[2] = string(name) + fields = append(fields, restFields...) + return fields +} diff --git a/psutil/process_other.go b/psutil/process_other.go new file mode 100644 index 00000000..f5464906 --- /dev/null +++ b/psutil/process_other.go @@ -0,0 +1,24 @@ +//go:build !linux +// +build !linux + +package psutil + +func (p *process) cpuTimes() (*cpuTimesStat, error) { + times, err := p.proc.Times() + if err != nil { + return nil, err + } + + s := &cpuTimesStat{ + total: cpuTotal(times), + system: times.System, + user: times.User, + } + + s.other = s.total - s.system - s.user + if s.other < 0.0001 { + s.other = 0 + } + + return s, nil +} diff --git a/psutil/psutil.go b/psutil/psutil.go index f6b95934..0af65387 100644 --- a/psutil/psutil.go +++ b/psutil/psutil.go @@ -102,6 +102,7 @@ type util struct { statPrevious cpuTimesStat statPreviousTime time.Time nTicks uint64 + mem MemoryInfoStat } // New returns a new util, it will be started automatically @@ -127,6 +128,13 @@ func New(root string) (Util, error) { } } + mem, err := u.virtualMemory() + if err != nil { + return nil, fmt.Errorf("unable to determine system memory: %w", err) + } + + u.mem = *mem + u.stopOnce.Do(func() {}) u.Start() @@ -139,7 +147,8 @@ func (u *util) Start() { ctx, cancel := context.WithCancel(context.Background()) u.stopTicker = cancel - go u.tick(ctx, 1000*time.Millisecond) + go u.tickCPU(ctx, time.Second) + go u.tickMemory(ctx, time.Second) }) } @@ -233,7 +242,7 @@ func (u *util) cgroupCPULimit(version int) (uint64, float64) { return 0, 0 } -func (u *util) tick(ctx context.Context, interval time.Duration) { +func (u *util) tickCPU(ctx context.Context, interval time.Duration) { ticker := time.NewTicker(interval) defer ticker.Stop() @@ -242,7 +251,7 @@ func (u *util) tick(ctx context.Context, interval time.Duration) { case <-ctx.Done(): return case t := <-ticker.C: - stat := u.collect() + stat := u.collectCPU() u.lock.Lock() u.statPrevious, u.statCurrent = u.statCurrent, stat @@ -253,7 +262,7 @@ func (u *util) tick(ctx context.Context, interval time.Duration) { } } -func (u *util) collect() cpuTimesStat { +func (u *util) collectCPU() cpuTimesStat { stat, err := u.cpuTimes() if err != nil { return cpuTimesStat{ @@ -265,6 +274,34 @@ func (u *util) collect() cpuTimesStat { return *stat } +func (u *util) tickMemory(ctx context.Context, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + stat := u.collectMemory() + if stat != nil { + u.lock.Lock() + u.mem = *stat + u.lock.Unlock() + } + } + } +} + +func (u *util) collectMemory() *MemoryInfoStat { + stat, err := u.virtualMemory() + if err != nil { + return nil + } + + return stat +} + func (u *util) CPUCounts(logical bool) (float64, error) { if u.hasCgroup && u.ncpu > 0 { return u.ncpu, nil @@ -409,7 +446,7 @@ func DiskUsage(path string) (*disk.UsageStat, error) { return DefaultUtil.DiskUsage(path) } -func (u *util) VirtualMemory() (*MemoryInfoStat, error) { +func (u *util) virtualMemory() (*MemoryInfoStat, error) { info, err := mem.VirtualMemory() if err != nil { return nil, err @@ -431,6 +468,19 @@ func (u *util) VirtualMemory() (*MemoryInfoStat, error) { }, nil } +func (u *util) VirtualMemory() (*MemoryInfoStat, error) { + u.lock.RLock() + defer u.lock.RUnlock() + + stat := &MemoryInfoStat{ + Total: u.mem.Total, + Available: u.mem.Available, + Used: u.mem.Used, + } + + return stat, nil +} + func VirtualMemory() (*MemoryInfoStat, error) { return DefaultUtil.VirtualMemory() } diff --git a/resources/resources.go b/resources/resources.go index 5d419824..d7255f05 100644 --- a/resources/resources.go +++ b/resources/resources.go @@ -3,6 +3,7 @@ package resources import ( "context" "fmt" + "os" "sync" "time" @@ -20,6 +21,7 @@ type MemoryInfo struct { Available uint64 // bytes Used uint64 // bytes Limit uint64 // bytes + Core uint64 // bytes Throttling bool Error error } @@ -31,6 +33,7 @@ type CPUInfo struct { Idle float64 // percent 0-100 Other float64 // percent 0-100 Limit float64 // percent 0-100 + Core float64 // percent 0-100 Throttling bool Error error } @@ -46,6 +49,8 @@ type resources struct { isCPULimiting bool isMemoryLimiting bool + self psutil.Process + cancelObserver context.CancelFunc lock sync.RWMutex @@ -137,6 +142,11 @@ func New(config Config) (Resources, error) { "max_memory": r.maxMemory, }) + r.self, err = psutil.NewProcess(int32(os.Getpid()), false) + if err != nil { + return nil, fmt.Errorf("unable to create process observer for self: %w", err) + } + r.logger.Debug().Log("Created") r.stopOnce.Do(func() {}) @@ -160,6 +170,7 @@ func (r *resources) Start() { func (r *resources) Stop() { r.stopOnce.Do(func() { r.cancelObserver() + r.self.Stop() r.startOnce = sync.Once{} @@ -326,6 +337,8 @@ func (r *resources) Info() Info { cpustat, cpuerr := r.psutil.CPUPercent() memstat, memerr := r.psutil.VirtualMemory() + selfcpu, _ := r.self.CPUPercent() + selfmem, _ := r.self.VirtualMemory() cpuinfo := CPUInfo{ NCPU: r.ncpu, @@ -334,6 +347,7 @@ func (r *resources) Info() Info { Idle: cpustat.Idle, Other: cpustat.Other, Limit: cpulimit, + Core: selfcpu.System + selfcpu.User + selfcpu.Other, Throttling: cputhrottling, Error: cpuerr, } @@ -343,6 +357,7 @@ func (r *resources) Info() Info { Available: memstat.Available, Used: memstat.Used, Limit: memlimit, + Core: selfmem, Throttling: memthrottling, Error: memerr, } diff --git a/restream/app/avstream.go b/restream/app/avstream.go index 3a780b7a..2a5deb5e 100644 --- a/restream/app/avstream.go +++ b/restream/app/avstream.go @@ -1,5 +1,7 @@ package app +import "github.com/datarhei/core/v16/ffmpeg/parse" + type AVstreamIO struct { State string Packet uint64 // counter @@ -7,6 +9,24 @@ type AVstreamIO struct { Size uint64 // bytes } +func (a *AVstreamIO) UnmarshalParser(p *parse.AVstreamIO) { + a.State = p.State + a.Packet = p.Packet + a.Time = p.Time + a.Size = p.Size +} + +func (a *AVstreamIO) MarshalParser() parse.AVstreamIO { + p := parse.AVstreamIO{ + State: a.State, + Packet: a.Packet, + Time: a.Time, + Size: a.Size, + } + + return p +} + type AVStreamSwap struct { URL string Status string @@ -14,6 +34,24 @@ type AVStreamSwap struct { LastError string } +func (a *AVStreamSwap) UnmarshalParser(p *parse.AVStreamSwap) { + a.URL = p.URL + a.Status = p.Status + a.LastURL = p.LastURL + a.LastError = p.LastError +} + +func (a *AVStreamSwap) MarshalParser() parse.AVStreamSwap { + p := parse.AVStreamSwap{ + URL: a.URL, + Status: a.Status, + LastURL: a.LastURL, + LastError: a.LastError, + } + + return p +} + type AVstream struct { Input AVstreamIO Output AVstreamIO @@ -30,3 +68,44 @@ type AVstream struct { Debug interface{} Swap AVStreamSwap } + +func (a *AVstream) UnmarshalParser(p *parse.AVstream) { + if p == nil { + return + } + + a.Aqueue = p.Aqueue + a.Queue = p.Queue + a.Dup = p.Dup + a.Drop = p.Drop + a.Enc = p.Enc + a.Looping = p.Looping + a.LoopingRuntime = p.LoopingRuntime + a.Duplicating = p.Duplicating + a.GOP = p.GOP + a.Mode = p.Mode + a.Swap.UnmarshalParser(&p.Swap) + a.Input.UnmarshalParser(&p.Input) + a.Output.UnmarshalParser(&p.Output) +} + +func (a *AVstream) MarshalParser() *parse.AVstream { + p := &parse.AVstream{ + Input: a.Input.MarshalParser(), + Output: a.Output.MarshalParser(), + Aqueue: a.Aqueue, + Queue: a.Queue, + Dup: a.Dup, + Drop: a.Drop, + Enc: a.Enc, + Looping: a.Looping, + LoopingRuntime: a.LoopingRuntime, + Duplicating: a.Duplicating, + GOP: a.GOP, + Mode: a.Mode, + Debug: a.Debug, + Swap: a.Swap.MarshalParser(), + } + + return p +} diff --git a/restream/app/avstream_test.go b/restream/app/avstream_test.go new file mode 100644 index 00000000..834aad0a --- /dev/null +++ b/restream/app/avstream_test.go @@ -0,0 +1,78 @@ +package app + +import ( + "testing" + + "github.com/datarhei/core/v16/ffmpeg/parse" + "github.com/stretchr/testify/require" +) + +func TestAVstreamIO(t *testing.T) { + original := parse.AVstreamIO{ + State: "running", + Packet: 484, + Time: 4373, + Size: 4783, + } + + p := AVstreamIO{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestAVstreamSwap(t *testing.T) { + original := parse.AVStreamSwap{ + URL: "ffdsjhhj", + Status: "none", + LastURL: "fjfd", + LastError: "none", + } + + p := AVStreamSwap{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestAVstream(t *testing.T) { + original := parse.AVstream{ + Input: parse.AVstreamIO{ + State: "running", + Packet: 484, + Time: 4373, + Size: 4783, + }, + Output: parse.AVstreamIO{ + State: "idle", + Packet: 4843, + Time: 483, + Size: 34, + }, + Aqueue: 8574, + Queue: 5877, + Dup: 473, + Drop: 463, + Enc: 474, + Looping: true, + LoopingRuntime: 347, + Duplicating: true, + GOP: "xxx", + Mode: "yyy", + Debug: nil, + Swap: parse.AVStreamSwap{ + URL: "ffdsjhhj", + Status: "none", + LastURL: "fjfd", + LastError: "none", + }, + } + + p := AVstream{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, &original, restored) +} diff --git a/restream/app/probe.go b/restream/app/probe.go index e2e9f3da..b49a5d5d 100644 --- a/restream/app/probe.go +++ b/restream/app/probe.go @@ -1,5 +1,7 @@ package app +import "github.com/datarhei/core/v16/ffmpeg/probe" + type ProbeIO struct { Address string @@ -26,7 +28,38 @@ type ProbeIO struct { Channels uint64 } +func (p *ProbeIO) UnmarshalProber(pp *probe.ProbeIO) { + p.Address = pp.Address + p.Index = pp.Index + p.Stream = pp.Stream + p.Language = pp.Language + p.Format = pp.Format + p.Type = pp.Type + p.Codec = pp.Codec + p.Coder = pp.Coder + p.Bitrate = pp.Bitrate + p.Duration = pp.Duration + p.Pixfmt = pp.Pixfmt + p.Width = pp.Width + p.Height = pp.Height + p.FPS = pp.FPS + p.Sampling = pp.Sampling + p.Layout = pp.Layout + p.Channels = pp.Channels +} + type Probe struct { Streams []ProbeIO Log []string } + +func (p *Probe) UnmarshalProber(pp *probe.Probe) { + p.Log = make([]string, len(pp.Log)) + copy(p.Log, pp.Log) + + p.Streams = make([]ProbeIO, len(pp.Streams)) + + for i, s := range pp.Streams { + p.Streams[i].UnmarshalProber(&s) + } +} diff --git a/restream/app/process.go b/restream/app/process.go index f48bec38..8d58d746 100644 --- a/restream/app/process.go +++ b/restream/app/process.go @@ -6,7 +6,9 @@ import ( "encoding/json" "strconv" "strings" + "sync" + "github.com/datarhei/core/v16/ffmpeg/parse" "github.com/datarhei/core/v16/process" ) @@ -79,7 +81,7 @@ type Config struct { StaleTimeout uint64 // seconds Timeout uint64 // seconds Scheduler string // crontab pattern or RFC3339 timestamp - LogPatterns []string // will we interpreted as regular expressions + LogPatterns []string // will be interpreted as regular expressions LimitCPU float64 // percent LimitMemory uint64 // bytes LimitWaitFor uint64 // seconds @@ -196,6 +198,37 @@ func (c *Config) ProcessID() ProcessID { } } +type order struct { + order string + lock sync.RWMutex +} + +func NewOrder(o string) order { + return order{ + order: o, + } +} + +func (o *order) Clone() order { + return order{ + order: o.order, + } +} + +func (o *order) String() string { + o.lock.RLock() + defer o.lock.RUnlock() + + return o.order +} + +func (o *order) Set(order string) { + o.lock.Lock() + defer o.lock.Unlock() + + o.order = order +} + type Process struct { ID string Owner string @@ -204,7 +237,7 @@ type Process struct { Config *Config CreatedAt int64 UpdatedAt int64 - Order string + Order order } func (process *Process) Clone() *Process { @@ -216,7 +249,7 @@ func (process *Process) Clone() *Process { Config: process.Config.Clone(), CreatedAt: process.CreatedAt, UpdatedAt: process.UpdatedAt, - Order: process.Order, + Order: process.Order.Clone(), } return clone @@ -272,6 +305,24 @@ type ProcessUsageCPU struct { IsThrottling bool } +func (p *ProcessUsageCPU) UnmarshalParser(pp *parse.UsageCPU) { + p.NCPU = pp.NCPU + p.Average = pp.Average + p.Max = pp.Max + p.Limit = pp.Limit +} + +func (p *ProcessUsageCPU) MarshalParser() parse.UsageCPU { + pp := parse.UsageCPU{ + NCPU: p.NCPU, + Average: p.Average, + Max: p.Max, + Limit: p.Limit, + } + + return pp +} + type ProcessUsageMemory struct { Current uint64 // bytes Average float64 // bytes @@ -279,11 +330,41 @@ type ProcessUsageMemory struct { Limit uint64 // bytes } +func (p *ProcessUsageMemory) UnmarshalParser(pp *parse.UsageMemory) { + p.Average = pp.Average + p.Max = pp.Max + p.Limit = pp.Limit +} + +func (p *ProcessUsageMemory) MarshalParser() parse.UsageMemory { + pp := parse.UsageMemory{ + Average: p.Average, + Max: p.Max, + Limit: p.Limit, + } + + return pp +} + type ProcessUsage struct { CPU ProcessUsageCPU Memory ProcessUsageMemory } +func (p *ProcessUsage) UnmarshalParser(pp *parse.Usage) { + p.CPU.UnmarshalParser(&pp.CPU) + p.Memory.UnmarshalParser(&pp.Memory) +} + +func (p *ProcessUsage) MarshalParser() parse.Usage { + pp := parse.Usage{ + CPU: p.CPU.MarshalParser(), + Memory: p.Memory.MarshalParser(), + } + + return pp +} + type ProcessID struct { ID string Domain string diff --git a/restream/app/process_test.go b/restream/app/process_test.go index 61f6ca1f..96889697 100644 --- a/restream/app/process_test.go +++ b/restream/app/process_test.go @@ -4,6 +4,7 @@ import ( "bytes" "testing" + "github.com/datarhei/core/v16/ffmpeg/parse" "github.com/stretchr/testify/require" ) @@ -58,3 +59,54 @@ func TestConfigHash(t *testing.T) { require.False(t, bytes.Equal(hash1, hash2)) } + +func TestProcessUsageCPU(t *testing.T) { + original := parse.UsageCPU{ + NCPU: 1.5, + Average: 0.9, + Max: 1.3, + Limit: 100, + } + + p := ProcessUsageCPU{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestProcessUsageMemory(t *testing.T) { + original := parse.UsageMemory{ + Average: 72, + Max: 150, + Limit: 200, + } + + p := ProcessUsageMemory{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestProcessUsage(t *testing.T) { + original := parse.Usage{ + CPU: parse.UsageCPU{ + NCPU: 1.5, + Average: 0.9, + Max: 1.3, + Limit: 100, + }, + Memory: parse.UsageMemory{ + Average: 72, + Max: 150, + Limit: 200, + }, + } + + p := ProcessUsage{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} diff --git a/restream/app/progress.go b/restream/app/progress.go index b59b5db8..d747ba67 100644 --- a/restream/app/progress.go +++ b/restream/app/progress.go @@ -1,5 +1,13 @@ package app +import "github.com/datarhei/core/v16/ffmpeg/parse" + +type ProgressIOFramerate struct { + Min float64 + Max float64 + Average float64 +} + type ProgressIO struct { ID string Address string @@ -13,11 +21,7 @@ type ProgressIO struct { Coder string Frame uint64 // counter Keyframe uint64 // counter - Framerate struct { - Min float64 - Max float64 - Average float64 - } + Framerate ProgressIOFramerate FPS float64 // rate, frames per second Packet uint64 // counter PPS float64 // rate, packets per second @@ -40,6 +44,73 @@ type ProgressIO struct { AVstream *AVstream } +func (p *ProgressIO) UnmarshalParser(pp *parse.ProgressIO) { + p.Address = pp.Address + p.Index = pp.Index + p.Stream = pp.Stream + p.Format = pp.Format + p.Type = pp.Type + p.Codec = pp.Codec + p.Coder = pp.Coder + p.Frame = pp.Frame + p.Keyframe = pp.Keyframe + p.Framerate = pp.Framerate + p.FPS = pp.FPS + p.Packet = pp.Packet + p.PPS = pp.PPS + p.Size = pp.Size + p.Bitrate = pp.Bitrate + p.Extradata = pp.Extradata + p.Pixfmt = pp.Pixfmt + p.Quantizer = pp.Quantizer + p.Width = pp.Width + p.Height = pp.Height + p.Sampling = pp.Sampling + p.Layout = pp.Layout + p.Channels = pp.Channels + + if pp.AVstream != nil { + p.AVstream = &AVstream{} + p.AVstream.UnmarshalParser(pp.AVstream) + } else { + p.AVstream = nil + } +} + +func (p *ProgressIO) MarshalParser() parse.ProgressIO { + pp := parse.ProgressIO{ + Address: p.Address, + Index: p.Index, + Stream: p.Stream, + Format: p.Format, + Type: p.Type, + Codec: p.Codec, + Coder: p.Coder, + Frame: p.Frame, + Keyframe: p.Keyframe, + Framerate: p.Framerate, + FPS: p.FPS, + Packet: p.Packet, + PPS: p.PPS, + Size: p.Size, + Bitrate: p.Bitrate, + Extradata: p.Extradata, + Pixfmt: p.Pixfmt, + Quantizer: p.Quantizer, + Width: p.Width, + Height: p.Height, + Sampling: p.Sampling, + Layout: p.Layout, + Channels: p.Channels, + } + + if p.AVstream != nil { + pp.AVstream = p.AVstream.MarshalParser() + } + + return pp +} + type Progress struct { Started bool Input []ProgressIO @@ -58,6 +129,69 @@ type Progress struct { Dup uint64 // counter } +func (p *Progress) UnmarshalParser(pp *parse.Progress) { + p.Started = pp.Started + p.Frame = pp.Frame + p.Packet = pp.Packet + p.FPS = pp.FPS + p.PPS = pp.PPS + p.Quantizer = pp.Quantizer + p.Size = pp.Size + p.Time = pp.Time + p.Bitrate = pp.Bitrate + p.Speed = pp.Speed + p.Drop = pp.Drop + p.Dup = pp.Dup + + p.Input = make([]ProgressIO, len(pp.Input)) + + for i, pinput := range pp.Input { + p.Input[i].UnmarshalParser(&pinput) + } + + p.Output = make([]ProgressIO, len(pp.Output)) + + for i, poutput := range pp.Output { + p.Output[i].UnmarshalParser(&poutput) + } + + p.Mapping.UnmarshalParser(&pp.Mapping) +} + +func (p *Progress) MarshalParser() parse.Progress { + pp := parse.Progress{ + Started: p.Started, + Input: []parse.ProgressIO{}, + Output: []parse.ProgressIO{}, + Mapping: p.Mapping.MarshalParser(), + Frame: p.Frame, + Packet: p.Packet, + FPS: p.FPS, + PPS: p.PPS, + Quantizer: p.Quantizer, + Size: p.Size, + Time: p.Time, + Bitrate: p.Bitrate, + Speed: p.Speed, + Drop: p.Drop, + Dup: p.Dup, + } + + pp.Input = make([]parse.ProgressIO, len(p.Input)) + + for i, pinput := range p.Input { + pp.Input[i] = pinput.MarshalParser() + } + + pp.Output = make([]parse.ProgressIO, len(p.Output)) + + for i, poutput := range p.Output { + pp.Output[i] = poutput.MarshalParser() + } + + return pp +} + type GraphElement struct { Index int Name string @@ -75,6 +209,44 @@ type GraphElement struct { Height uint64 } +func (g *GraphElement) UnmarshalParser(p *parse.GraphElement) { + g.Index = p.Index + g.Name = p.Name + g.Filter = p.Filter + g.DstName = p.DstName + g.DstFilter = p.DstFilter + g.Inpad = p.Inpad + g.Outpad = p.Outpad + g.Timebase = p.Timebase + g.Type = p.Type + g.Format = p.Format + g.Sampling = p.Sampling + g.Layout = p.Layout + g.Width = p.Width + g.Height = p.Height +} + +func (g *GraphElement) MarshalParser() parse.GraphElement { + p := parse.GraphElement{ + Index: g.Index, + Name: g.Name, + Filter: g.Filter, + DstName: g.DstName, + DstFilter: g.DstFilter, + Inpad: g.Inpad, + Outpad: g.Outpad, + Timebase: g.Timebase, + Type: g.Type, + Format: g.Format, + Sampling: g.Sampling, + Layout: g.Layout, + Width: g.Width, + Height: g.Height, + } + + return p +} + type GraphMapping struct { Input int // Index of input stream, negative if output element Output int // Index of output stream, negative if input element @@ -83,7 +255,58 @@ type GraphMapping struct { Copy bool // Whether it's a streamcopy i.e. there's no graph } +func (g *GraphMapping) UnmarshalParser(p *parse.GraphMapping) { + g.Input = p.Input + g.Output = p.Output + g.Index = p.Index + g.Name = p.Name + g.Copy = p.Copy +} + +func (g *GraphMapping) MarshalParser() parse.GraphMapping { + p := parse.GraphMapping{ + Input: g.Input, + Output: g.Output, + Index: g.Index, + Name: g.Name, + Copy: g.Copy, + } + + return p +} + type StreamMapping struct { Graphs []GraphElement Mapping []GraphMapping } + +func (s *StreamMapping) UnmarshalParser(p *parse.StreamMapping) { + s.Graphs = make([]GraphElement, len(p.Graphs)) + + for i, graph := range p.Graphs { + s.Graphs[i].UnmarshalParser(&graph) + } + + s.Mapping = make([]GraphMapping, len(p.Mapping)) + + for i, mapping := range p.Mapping { + s.Mapping[i].UnmarshalParser(&mapping) + } +} + +func (s *StreamMapping) MarshalParser() parse.StreamMapping { + p := parse.StreamMapping{ + Graphs: make([]parse.GraphElement, len(s.Graphs)), + Mapping: make([]parse.GraphMapping, len(s.Mapping)), + } + + for i, graph := range s.Graphs { + p.Graphs[i] = graph.MarshalParser() + } + + for i, mapping := range s.Mapping { + p.Mapping[i] = mapping.MarshalParser() + } + + return p +} diff --git a/restream/app/progress_test.go b/restream/app/progress_test.go new file mode 100644 index 00000000..bfffa293 --- /dev/null +++ b/restream/app/progress_test.go @@ -0,0 +1,390 @@ +package app + +import ( + "testing" + + "github.com/datarhei/core/v16/ffmpeg/parse" + "github.com/stretchr/testify/require" +) + +func TestProgressIO(t *testing.T) { + original := parse.ProgressIO{ + Address: "fhdj", + Index: 2, + Stream: 4, + Format: "yuv420p", + Type: "video", + Codec: "h264", + Coder: "libx264", + Frame: 39, + Keyframe: 433, + Framerate: struct { + Min float64 + Max float64 + Average float64 + }{ + Min: 47.0, + Max: 97.8, + Average: 463.9, + }, + FPS: 34.8, + Packet: 4737, + PPS: 473.8, + Size: 48474, + Bitrate: 38473, + Extradata: 4874, + Pixfmt: "none", + Quantizer: 2.3, + Width: 4848, + Height: 9373, + Sampling: 4733, + Layout: "atmos", + Channels: 83, + } + + p := ProgressIO{ + AVstream: nil, + } + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestProgressIOWithAVstream(t *testing.T) { + original := parse.ProgressIO{ + Address: "fhdj", + Index: 2, + Stream: 4, + Format: "yuv420p", + Type: "video", + Codec: "h264", + Coder: "libx264", + Frame: 39, + Keyframe: 433, + Framerate: struct { + Min float64 + Max float64 + Average float64 + }{ + Min: 47.0, + Max: 97.8, + Average: 463.9, + }, + FPS: 34.8, + Packet: 4737, + PPS: 473.8, + Size: 48474, + Bitrate: 38473, + Extradata: 4874, + Pixfmt: "none", + Quantizer: 2.3, + Width: 4848, + Height: 9373, + Sampling: 4733, + Layout: "atmos", + Channels: 83, + AVstream: &parse.AVstream{ + Input: parse.AVstreamIO{ + State: "running", + Packet: 484, + Time: 4373, + Size: 4783, + }, + Output: parse.AVstreamIO{ + State: "idle", + Packet: 4843, + Time: 483, + Size: 34, + }, + Aqueue: 8574, + Queue: 5877, + Dup: 473, + Drop: 463, + Enc: 474, + Looping: true, + LoopingRuntime: 347, + Duplicating: true, + GOP: "xxx", + Mode: "yyy", + Debug: nil, + Swap: parse.AVStreamSwap{ + URL: "ffdsjhhj", + Status: "none", + LastURL: "fjfd", + LastError: "none", + }, + }, + } + + p := ProgressIO{ + AVstream: nil, + } + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestGraphMapping(t *testing.T) { + original := parse.GraphMapping{ + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + } + + p := GraphMapping{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestGraphElement(t *testing.T) { + original := parse.GraphElement{ + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + } + + p := GraphElement{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestStreamMapping(t *testing.T) { + original := parse.StreamMapping{ + Graphs: []parse.GraphElement{ + { + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + }, + }, + Mapping: []parse.GraphMapping{ + { + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + }, + }, + } + + p := StreamMapping{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} + +func TestProgress(t *testing.T) { + original := parse.Progress{ + Started: false, + Input: []parse.ProgressIO{ + { + Address: "fhd873487j", + Index: 2, + Stream: 4, + Format: "yuv420p", + Type: "video", + Codec: "h264", + Coder: "libx264", + Frame: 39, + Keyframe: 433, + Framerate: struct { + Min float64 + Max float64 + Average float64 + }{ + Min: 47.0, + Max: 97.8, + Average: 463.9, + }, + FPS: 34.8, + Packet: 4737, + PPS: 473.8, + Size: 48474, + Bitrate: 38473, + Extradata: 4874, + Pixfmt: "none", + Quantizer: 2.3, + Width: 4848, + Height: 9373, + Sampling: 4733, + Layout: "atmos", + Channels: 83, + AVstream: &parse.AVstream{ + Input: parse.AVstreamIO{ + State: "running", + Packet: 484, + Time: 4373, + Size: 4783, + }, + Output: parse.AVstreamIO{ + State: "idle", + Packet: 4843, + Time: 483, + Size: 34, + }, + Aqueue: 8574, + Queue: 5877, + Dup: 473, + Drop: 463, + Enc: 474, + Looping: true, + LoopingRuntime: 347, + Duplicating: true, + GOP: "xxx", + Mode: "yyy", + Debug: nil, + Swap: parse.AVStreamSwap{ + URL: "ffdsjhhj", + Status: "none", + LastURL: "fjfd", + LastError: "none", + }, + }, + }, + }, + Output: []parse.ProgressIO{ + { + Address: "fhdj", + Index: 2, + Stream: 4, + Format: "yuv420p", + Type: "video", + Codec: "h264", + Coder: "libx264", + Frame: 39, + Keyframe: 433, + Framerate: struct { + Min float64 + Max float64 + Average float64 + }{ + Min: 47.0, + Max: 97.8, + Average: 463.9, + }, + FPS: 34.8, + Packet: 4737, + PPS: 473.8, + Size: 48474, + Bitrate: 38473, + Extradata: 4874, + Pixfmt: "none", + Quantizer: 2.3, + Width: 4848, + Height: 9373, + Sampling: 4733, + Layout: "atmos", + Channels: 83, + AVstream: &parse.AVstream{ + Input: parse.AVstreamIO{ + State: "running", + Packet: 484, + Time: 4373, + Size: 4783, + }, + Output: parse.AVstreamIO{ + State: "idle", + Packet: 4843, + Time: 483, + Size: 34, + }, + Aqueue: 8574, + Queue: 5877, + Dup: 473, + Drop: 463, + Enc: 474, + Looping: true, + LoopingRuntime: 347, + Duplicating: true, + GOP: "xxx", + Mode: "yyy", + Debug: nil, + Swap: parse.AVStreamSwap{ + URL: "ffdsjhhj", + Status: "none", + LastURL: "fjfd", + LastError: "none", + }, + }, + }, + }, + Mapping: parse.StreamMapping{ + Graphs: []parse.GraphElement{ + { + Index: 5, + Name: "foobar", + Filter: "infilter", + DstName: "outfilter_", + DstFilter: "outfilter", + Inpad: "inpad", + Outpad: "outpad", + Timebase: "100", + Type: "video", + Format: "yuv420p", + Sampling: 39944, + Layout: "atmos", + Width: 1029, + Height: 463, + }, + }, + Mapping: []parse.GraphMapping{ + { + Input: 1, + Output: 3, + Index: 39, + Name: "foobar", + Copy: true, + }, + }, + }, + Frame: 0, + Packet: 0, + FPS: 0, + PPS: 0, + Quantizer: 0, + Size: 0, + Time: 0, + Bitrate: 0, + Speed: 0, + Drop: 0, + Dup: 0, + } + + p := Progress{} + p.UnmarshalParser(&original) + restored := p.MarshalParser() + + require.Equal(t, original, restored) +} diff --git a/restream/app/report.go b/restream/app/report.go index 8bc80848..3bf69500 100644 --- a/restream/app/report.go +++ b/restream/app/report.go @@ -2,6 +2,10 @@ package app import ( "time" + + "github.com/datarhei/core/v16/ffmpeg/parse" + "github.com/datarhei/core/v16/process" + "github.com/datarhei/core/v16/slices" ) type LogLine struct { @@ -9,6 +13,20 @@ type LogLine struct { Data string } +func (l *LogLine) UnmarshalProcess(p *process.Line) { + l.Timestamp = p.Timestamp + l.Data = p.Data +} + +func (l *LogLine) MarshalProcess() process.Line { + p := process.Line{ + Timestamp: l.Timestamp, + Data: l.Data, + } + + return p +} + type ReportEntry struct { CreatedAt time.Time Prelude []string @@ -16,6 +34,32 @@ type ReportEntry struct { Matches []string } +func (r *ReportEntry) UnmarshalParser(p *parse.Report) { + r.CreatedAt = p.CreatedAt + r.Prelude = slices.Copy(p.Prelude) + r.Matches = slices.Copy(p.Matches) + + r.Log = make([]LogLine, len(p.Log)) + for i, line := range p.Log { + r.Log[i].UnmarshalProcess(&line) + } +} + +func (r *ReportEntry) MarshalParser() parse.Report { + p := parse.Report{ + CreatedAt: r.CreatedAt, + Prelude: slices.Copy(r.Prelude), + Matches: slices.Copy(r.Matches), + } + + p.Log = make([]process.Line, len(r.Log)) + for i, line := range r.Log { + p.Log[i] = line.MarshalProcess() + } + + return p +} + type ReportHistoryEntry struct { ReportEntry @@ -25,11 +69,47 @@ type ReportHistoryEntry struct { Usage ProcessUsage } +func (r *ReportHistoryEntry) UnmarshalParser(p *parse.ReportHistoryEntry) { + r.ReportEntry.UnmarshalParser(&p.Report) + + r.ExitedAt = p.ExitedAt + r.ExitState = p.ExitState + r.Usage.UnmarshalParser(&p.Usage) + r.Progress.UnmarshalParser(&p.Progress) +} + +func (r *ReportHistoryEntry) MarshalParser() parse.ReportHistoryEntry { + p := parse.ReportHistoryEntry{ + Report: r.ReportEntry.MarshalParser(), + ExitedAt: r.ExitedAt, + ExitState: r.ExitState, + Progress: r.Progress.MarshalParser(), + Usage: r.Usage.MarshalParser(), + } + + return p +} + type Report struct { ReportEntry History []ReportHistoryEntry } +func (r *Report) UnmarshalParser(p *parse.Report) { + r.ReportEntry.UnmarshalParser(p) +} + +func (r *Report) MarshalParser() (parse.Report, []parse.ReportHistoryEntry) { + report := r.ReportEntry.MarshalParser() + history := make([]parse.ReportHistoryEntry, 0, len(r.History)) + + for _, h := range r.History { + history = append(history, h.MarshalParser()) + } + + return report, history +} + type ReportHistorySearchResult struct { ProcessID string Reference string diff --git a/restream/restream.go b/restream/core.go similarity index 68% rename from restream/restream.go rename to restream/core.go index 365959c4..e9effb7a 100644 --- a/restream/restream.go +++ b/restream/core.go @@ -12,22 +12,18 @@ import ( "time" "github.com/datarhei/core/v16/ffmpeg" - "github.com/datarhei/core/v16/ffmpeg/parse" - "github.com/datarhei/core/v16/ffmpeg/probe" "github.com/datarhei/core/v16/ffmpeg/skills" "github.com/datarhei/core/v16/glob" "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/net/url" - "github.com/datarhei/core/v16/process" "github.com/datarhei/core/v16/resources" "github.com/datarhei/core/v16/restream/app" rfs "github.com/datarhei/core/v16/restream/fs" "github.com/datarhei/core/v16/restream/replace" "github.com/datarhei/core/v16/restream/rewrite" "github.com/datarhei/core/v16/restream/store" - jsonstore "github.com/datarhei/core/v16/restream/store/json" "github.com/Masterminds/semver/v3" ) @@ -55,7 +51,8 @@ type Restreamer interface { ReloadProcess(id app.ProcessID) error // Reload a process GetProcess(id app.ProcessID) (*app.Process, error) // Get a process GetProcessState(id app.ProcessID) (*app.State, error) // Get the state of a process - GetProcessLog(id app.ProcessID) (*app.Report, error) // Get the logs of a process + GetProcessReport(id app.ProcessID) (*app.Report, error) // Get the logs of a process + SetProcessReport(id app.ProcessID, report *app.Report) error // Set the log history of a process SearchProcessLogHistory(idpattern, refpattern, state string, from, to *time.Time) []app.ReportHistorySearchResult // Search the log history of all processes GetPlayout(id app.ProcessID, inputid string) (string, error) // Get the URL of the playout API for a process SetProcessMetadata(id app.ProcessID, key string, data interface{}) error // Set metatdata to a process @@ -78,34 +75,6 @@ type Config struct { Logger log.Logger } -type task struct { - valid bool - id string // ID of the task/process - owner string - domain string - reference string - process *app.Process - config *app.Config // Process config with replaced static placeholders - command []string // The actual command parameter for ffmpeg - ffmpeg process.Process - parser parse.Parser - playout map[string]int - logger log.Logger - usesDisk bool // Whether this task uses the disk - metadata map[string]interface{} -} - -func (t *task) ID() app.ProcessID { - return app.ProcessID{ - ID: t.id, - Domain: t.domain, - } -} - -func (t *task) String() string { - return t.ID().String() -} - type restream struct { id string name string @@ -119,8 +88,8 @@ type restream struct { } replace replace.Replacer rewrite rewrite.Rewriter - tasks map[app.ProcessID]*task // domain:ProcessID - metadata map[string]interface{} // global metadata + tasks *Storage // domain:ProcessID + metadata map[string]interface{} // global metadata logger log.Logger resources resources.Resources @@ -144,23 +113,14 @@ func New(config Config) (Restreamer, error) { replace: config.Replace, rewrite: config.Rewrite, logger: config.Logger, + tasks: NewStorage(), + metadata: map[string]interface{}{}, } if r.logger == nil { r.logger = log.New("") } - if r.store == nil { - dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) - s, err := jsonstore.New(jsonstore.Config{ - Filesystem: dummyfs, - }) - if err != nil { - return nil, err - } - r.store = s - } - if len(config.Filesystems) == 0 { return nil, fmt.Errorf("at least one filesystem must be provided") } @@ -223,14 +183,14 @@ func (r *restream) Start() { go r.resourceObserver(ctx, r.resources, time.Second) } - for id, t := range r.tasks { - if t.process.Order == "start" { - r.startProcess(id) - } + r.tasks.Range(func(id app.ProcessID, t *task) bool { + t.Restore() // The filesystem cleanup rules can be set - r.setCleanup(id, t.config) - } + r.setCleanup(id, t.Config()) + + return true + }) for _, fs := range r.fs.list { fs.Start() @@ -249,28 +209,27 @@ func (r *restream) Stop() { r.lock.Lock() defer r.lock.Unlock() - // Stop the currently running processes without altering their order such that on a subsequent - // Start() they will get restarted. - wg := sync.WaitGroup{} - for _, t := range r.tasks { - if t.ffmpeg == nil { - continue - } + // Stop the currently running processes without altering their order such that on a subsequent + // Start() they will get restarted. + r.tasks.Range(func(_ app.ProcessID, t *task) bool { wg.Add(1) - go func(p process.Process) { + go func(t *task) { defer wg.Done() - p.Stop(true) - }(t.ffmpeg) - } + t.Kill() + }(t) + + return true + }) wg.Wait() - for id := range r.tasks { + r.tasks.Range(func(id app.ProcessID, _ *task) bool { r.unsetCleanup(id) - } + return true + }) r.cancelObserver() @@ -300,24 +259,16 @@ func (r *restream) filesystemObserver(ctx context.Context, fs fs.Filesystem, int if isFull { // Stop all tasks that write to this filesystem - r.lock.Lock() - for id, t := range r.tasks { - if !t.valid { - continue + r.tasks.Range(func(id app.ProcessID, t *task) bool { + if !t.UsesDisk() { + return true } - if !t.usesDisk { - continue - } + r.logger.Warn().WithField("id", id).Log("Shutting down because filesystem is full") + t.Stop() - if t.process.Order != "start" { - continue - } - - r.logger.Warn().Log("Shutting down because filesystem is full") - r.stopProcess(id) - } - r.lock.Unlock() + return true + }) } } } @@ -352,34 +303,37 @@ func (r *restream) resourceObserver(ctx context.Context, rsc resources.Resources break } - r.lock.RLock() - for id, t := range r.tasks { - if !t.valid { - continue + r.tasks.Range(func(id app.ProcessID, t *task) bool { + if t.Limit(limitCPU, limitMemory) { + r.logger.Debug().WithFields(log.Fields{ + "limit_cpu": limitCPU, + "limit_memory": limitMemory, + "id": id, + }).Log("Limiting process CPU and memory consumption") } - r.logger.Debug().WithFields(log.Fields{ - "limit_cpu": limitCPU, - "limit_memory": limitMemory, - "id": id, - }).Log("Limiting process CPU and memory consumption") - t.ffmpeg.Limit(limitCPU, limitMemory) - } - r.lock.RUnlock() + return true + }) } } } func (r *restream) load() error { + if r.store == nil { + return nil + } + data, err := r.store.Load() + if err != nil { return err } - tasks := make(map[app.ProcessID]*task) + tasks := NewStorage() skills := r.ffmpeg.Skills() ffversion := skills.FFmpeg.Version + if v, err := semver.NewVersion(ffversion); err == nil { // Remove the patch level for the constraint ffversion = fmt.Sprintf("%d.%d.0", v.Major(), v.Minor()) @@ -391,36 +345,27 @@ func (r *restream) load() error { p.Process.Config.FFVersion = "^" + ffversion } - t := &task{ - id: p.Process.ID, - owner: p.Process.Owner, - domain: p.Process.Domain, - reference: p.Process.Reference, - process: p.Process, - config: p.Process.Config.Clone(), - logger: r.logger.WithFields(log.Fields{ - "id": p.Process.ID, - "owner": p.Process.Owner, - "domain": p.Process.Domain, - "reference": p.Process.Reference, - }), - } + t := NewTask(p.Process, r.logger.WithFields(log.Fields{ + "id": p.Process.ID, + "owner": p.Process.Owner, + "domain": p.Process.Domain, + "reference": p.Process.Reference, + })) - t.metadata = p.Metadata + t.ImportMetadata(p.Metadata) // Replace all placeholders in the config resolveStaticPlaceholders(t.config, r.replace) - tasks[t.ID()] = t + tasks.LoadOrStore(t.ID(), t) } } // Now that all tasks are defined and all placeholders are // replaced, we can resolve references and validate the // inputs and outputs. - for _, t := range tasks { - t := t + tasks.Range(func(_ app.ProcessID, t *task) bool { // Just warn if the ffmpeg version constraint doesn't match the available ffmpeg version if c, err := semver.NewConstraint(t.config.FFVersion); err == nil { if v, err := semver.NewVersion(skills.FFmpeg.Version); err == nil { @@ -440,7 +385,7 @@ func (r *restream) load() error { err := r.resolveAddresses(tasks, t.config) if err != nil { t.logger.Warn().WithError(err).Log("Ignoring") - continue + return true } // Validate config with all placeholders replaced. However, we need to take care @@ -451,13 +396,13 @@ func (r *restream) load() error { t.usesDisk, err = validateConfig(config, r.fs.list, r.ffmpeg) if err != nil { t.logger.Warn().WithError(err).Log("Ignoring") - continue + return true } err = r.setPlayoutPorts(t) if err != nil { t.logger.Warn().WithError(err).Log("Ignoring") - continue + return true } t.command = t.config.CreateCommand() @@ -495,23 +440,31 @@ func (r *restream) load() error { }, }) if err != nil { - return err + return true } t.ffmpeg = ffmpeg - t.valid = true - } + t.Valid(true) + return true + }) + + r.tasks.Clear() r.tasks = tasks + r.metadata = data.Metadata return nil } func (r *restream) save() { + if r.store == nil { + return + } + data := store.NewData() - for tid, t := range r.tasks { + r.tasks.Range(func(tid app.ProcessID, t *task) bool { domain := data.Process[tid.Domain] if domain == nil { domain = map[string]store.Process{} @@ -523,7 +476,9 @@ func (r *restream) save() { } data.Process[tid.Domain] = domain - } + + return true + }) data.Metadata = r.metadata @@ -548,35 +503,26 @@ var ErrProcessExists = errors.New("process already exists") var ErrForbidden = errors.New("forbidden") func (r *restream) AddProcess(config *app.Config) error { - r.lock.RLock() t, err := r.createTask(config) - r.lock.RUnlock() if err != nil { return err } - r.lock.Lock() - defer r.lock.Unlock() - tid := t.ID() - _, ok := r.tasks[tid] + _, ok := r.tasks.LoadOrStore(tid, t) if ok { return ErrProcessExists } - r.tasks[tid] = t - // set filesystem cleanup rules r.setCleanup(tid, t.config) - if t.process.Order == "start" { - err := r.startProcess(tid) - if err != nil { - delete(r.tasks, tid) - return err - } + err = t.Restore() + if err != nil { + r.tasks.Delete(tid) + return err } r.save() @@ -604,30 +550,22 @@ func (r *restream) createTask(config *app.Config) (*task, error) { Domain: config.Domain, Reference: config.Reference, Config: config.Clone(), - Order: "stop", + Order: app.NewOrder("stop"), CreatedAt: time.Now().Unix(), } process.UpdatedAt = process.CreatedAt if config.Autostart { - process.Order = "start" + process.Order.Set("start") } - t := &task{ - id: config.ID, - owner: config.Owner, - domain: config.Domain, - reference: process.Reference, - process: process, - config: process.Config.Clone(), - logger: r.logger.WithFields(log.Fields{ - "id": process.ID, - "owner": process.Owner, - "reference": process.Reference, - "domain": process.Domain, - }), - } + t := NewTask(process, r.logger.WithFields(log.Fields{ + "id": process.ID, + "owner": process.Owner, + "reference": process.Reference, + "domain": process.Domain, + })) resolveStaticPlaceholders(t.config, r.replace) @@ -693,7 +631,7 @@ func (r *restream) createTask(config *app.Config) (*task, error) { t.ffmpeg = ffmpeg - t.valid = true + t.Valid(true) return t, nil } @@ -1018,7 +956,7 @@ func validateOutputAddress(address, basedir string, ffmpeg ffmpeg.FFmpeg) (strin } // resolveAddresses replaces the addresse reference from each input in a config with the actual address. -func (r *restream) resolveAddresses(tasks map[app.ProcessID]*task, config *app.Config) error { +func (r *restream) resolveAddresses(tasks *Storage, config *app.Config) error { for i, input := range config.Input { // Resolve any references address, err := r.resolveAddress(tasks, config.ID, input.Address) @@ -1035,7 +973,7 @@ func (r *restream) resolveAddresses(tasks map[app.ProcessID]*task, config *app.C } // resolveAddress replaces the address reference with the actual address. -func (r *restream) resolveAddress(tasks map[app.ProcessID]*task, id, address string) (string, error) { +func (r *restream) resolveAddress(tasks *Storage, id, address string) (string, error) { matches, err := parseAddressReference(address) if err != nil { return address, err @@ -1052,12 +990,14 @@ func (r *restream) resolveAddress(tasks map[app.ProcessID]*task, id, address str var t *task = nil - for _, tsk := range tasks { + tasks.Range(func(_ app.ProcessID, tsk *task) bool { if tsk.id == matches["id"] && tsk.domain == matches["domain"] { t = tsk - break + return false } - } + + return true + }) if t == nil { return address, fmt.Errorf("unknown process '%s' in domain '%s' (%s)", matches["id"], matches["domain"], address) @@ -1169,16 +1109,24 @@ func parseAddressReference(address string) (map[string]string, error) { } func (r *restream) UpdateProcess(id app.ProcessID, config *app.Config) error { - r.lock.Lock() - defer r.lock.Unlock() + err := r.updateProcess(id, config) + if err != nil { + return err + } + + r.save() + + return nil +} - task, ok := r.tasks[id] +func (r *restream) updateProcess(id app.ProcessID, config *app.Config) error { + task, ok := r.tasks.Load(id) if !ok { return ErrUnknownProcess } // If the new config has the same hash as the current config, do nothing. - if task.process.Config.Equal(config) { + if task.Equal(config) { return nil } @@ -1190,153 +1138,102 @@ func (r *restream) UpdateProcess(id app.ProcessID, config *app.Config) error { tid := t.ID() if !tid.Equal(id) { - _, ok := r.tasks[tid] + _, ok := r.tasks.Load(tid) if ok { return ErrProcessExists } } - t.process.Order = task.process.Order + t.process.Order.Set(task.Order()) if err := r.stopProcess(id); err != nil { return fmt.Errorf("stop process: %w", err) } - if err := r.deleteProcess(id); err != nil { - return fmt.Errorf("delete process: %w", err) - } - // This would require a major version jump //t.process.CreatedAt = task.process.CreatedAt - t.process.UpdatedAt = time.Now().Unix() // Transfer the report history to the new process - task.parser.TransferReportHistory(t.parser) + history := task.ExportParserReportHistory() + t.ImportParserReportHistory(history) // Transfer the metadata to the new process - t.metadata = task.metadata + metadata := task.ExportMetadata() + t.ImportMetadata(metadata) - r.tasks[tid] = t + if err := r.deleteProcess(id); err != nil { + return fmt.Errorf("delete process: %w", err) + } - // set filesystem cleanup rules - r.setCleanup(tid, t.config) + r.tasks.Store(tid, t) - if t.process.Order == "start" { - r.startProcess(tid) - } + // set filesystem cleanup rules + r.setCleanup(tid, t.Config()) - r.save() + t.Restore() return nil } func (r *restream) GetProcessIDs(idpattern, refpattern, ownerpattern, domainpattern string) []app.ProcessID { - count := 0 - - var idglob glob.Glob - var refglob glob.Glob - var ownerglob glob.Glob - var domainglob glob.Glob + var idglob glob.Glob = nil + var refglob glob.Glob = nil + var ownerglob glob.Glob = nil + var domainglob glob.Glob = nil if len(idpattern) != 0 { - count++ idglob, _ = glob.Compile(idpattern) } if len(refpattern) != 0 { - count++ refglob, _ = glob.Compile(refpattern) } if len(ownerpattern) != 0 { - count++ ownerglob, _ = glob.Compile(ownerpattern) } if len(domainpattern) != 0 { - count++ domainglob, _ = glob.Compile(domainpattern) } var ids []app.ProcessID - r.lock.RLock() - defer r.lock.RUnlock() + if idglob == nil && refglob == nil && ownerglob == nil && domainglob == nil { + ids = make([]app.ProcessID, 0, r.tasks.Size()) - if count == 0 { - ids = make([]app.ProcessID, 0, len(r.tasks)) + r.tasks.Range(func(id app.ProcessID, t *task) bool { + ids = append(ids, id) - for _, t := range r.tasks { - tid := app.ProcessID{ - ID: t.id, - Domain: t.domain, - } - - ids = append(ids, tid) - } + return true + }) } else { ids = []app.ProcessID{} - for _, t := range r.tasks { - matches := 0 - if idglob != nil { - if match := idglob.Match(t.id); match { - matches++ - } + r.tasks.Range(func(id app.ProcessID, t *task) bool { + if !t.Match(idglob, refglob, ownerglob, domainglob) { + return true } - if refglob != nil { - if match := refglob.Match(t.reference); match { - matches++ - } - } + ids = append(ids, id) - if ownerglob != nil { - if match := ownerglob.Match(t.owner); match { - matches++ - } - } - - if domainglob != nil { - if match := domainglob.Match(t.domain); match { - matches++ - } - } - - if count != matches { - continue - } - - tid := app.ProcessID{ - ID: t.id, - Domain: t.domain, - } - - ids = append(ids, tid) - } + return true + }) } return ids } func (r *restream) GetProcess(id app.ProcessID) (*app.Process, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - task, ok := r.tasks[id] + task, ok := r.tasks.Load(id) if !ok { return &app.Process{}, ErrUnknownProcess } - process := task.process.Clone() - - return process, nil + return task.Process(), nil } func (r *restream) DeleteProcess(id app.ProcessID) error { - r.lock.Lock() - defer r.lock.Unlock() - err := r.deleteProcess(id) if err != nil { return err @@ -1348,27 +1245,24 @@ func (r *restream) DeleteProcess(id app.ProcessID) error { } func (r *restream) deleteProcess(tid app.ProcessID) error { - task, ok := r.tasks[tid] + task, ok := r.tasks.Load(tid) if !ok { return ErrUnknownProcess } - if task.process.Order != "stop" { + if task.Order() != "stop" { return fmt.Errorf("the process with the ID '%s' is still running", tid) } r.unsetPlayoutPorts(task) r.unsetCleanup(tid) - delete(r.tasks, tid) + r.tasks.Delete(tid) return nil } func (r *restream) StartProcess(id app.ProcessID) error { - r.lock.Lock() - defer r.lock.Unlock() - err := r.startProcess(id) if err != nil { return err @@ -1380,40 +1274,22 @@ func (r *restream) StartProcess(id app.ProcessID) error { } func (r *restream) startProcess(tid app.ProcessID) error { - task, ok := r.tasks[tid] + task, ok := r.tasks.Load(tid) if !ok { return ErrUnknownProcess } - if !task.valid { - return fmt.Errorf("invalid process definition") - } - - if task.ffmpeg != nil { - status := task.ffmpeg.Status() - - if task.process.Order == "start" && status.Order == "start" { - return nil - } - } - - if r.maxProc > 0 && r.nProc >= r.maxProc { - return fmt.Errorf("max. number of running processes (%d) reached", r.maxProc) + err := task.Start() + if err != nil { + return err } - task.process.Order = "start" - - task.ffmpeg.Start() - r.nProc++ return nil } func (r *restream) StopProcess(id app.ProcessID) error { - r.lock.Lock() - defer r.lock.Unlock() - err := r.stopProcess(id) if err != nil { return err @@ -1425,63 +1301,38 @@ func (r *restream) StopProcess(id app.ProcessID) error { } func (r *restream) stopProcess(tid app.ProcessID) error { - task, ok := r.tasks[tid] + task, ok := r.tasks.Load(tid) if !ok { return ErrUnknownProcess } - if task.ffmpeg == nil { - return nil - } - - status := task.ffmpeg.Status() - - if task.process.Order == "stop" && status.Order == "stop" { - return nil + // TODO: aufpassen mit nProc und nil error. In task.Stop() noch einen error einführen, falls der process nicht läuft. + err := task.Stop() + if err != nil { + return err } - task.process.Order = "stop" - - task.ffmpeg.Stop(true) - r.nProc-- return nil } func (r *restream) RestartProcess(id app.ProcessID) error { - r.lock.RLock() - defer r.lock.RUnlock() - return r.restartProcess(id) } func (r *restream) restartProcess(tid app.ProcessID) error { - task, ok := r.tasks[tid] + task, ok := r.tasks.Load(tid) if !ok { return ErrUnknownProcess } - if !task.valid { - return fmt.Errorf("invalid process definition") - } - - if task.process.Order == "stop" { - return nil - } - - if task.ffmpeg != nil { - task.ffmpeg.Stop(true) - task.ffmpeg.Start() - } + task.Restart() return nil } func (r *restream) ReloadProcess(id app.ProcessID) error { - r.lock.Lock() - defer r.lock.Unlock() - err := r.reloadProcess(id) if err != nil { return err @@ -1492,91 +1343,44 @@ func (r *restream) ReloadProcess(id app.ProcessID) error { return nil } -func (r *restream) reloadProcess(tid app.ProcessID) error { - t, ok := r.tasks[tid] +func (r *restream) reloadProcess(id app.ProcessID) error { + task, ok := r.tasks.Load(id) if !ok { return ErrUnknownProcess } - t.valid = false - - t.config = t.process.Config.Clone() - - resolveStaticPlaceholders(t.config, r.replace) - - err := r.resolveAddresses(r.tasks, t.config) + t, err := r.createTask(task.Config()) if err != nil { return err } - // Validate config with all placeholders replaced. However, we need to take care - // that the config with the task keeps its dynamic placeholders for process starts. - config := t.config.Clone() - resolveDynamicPlaceholder(config, r.replace) + tid := t.ID() - t.usesDisk, err = validateConfig(config, r.fs.list, r.ffmpeg) - if err != nil { - return err - } + t.process.Order.Set(task.Order()) - err = r.setPlayoutPorts(t) - if err != nil { - return err + if err := task.Stop(); err != nil { + return fmt.Errorf("stop process: %w", err) } - t.command = t.config.CreateCommand() - - order := "stop" - if t.process.Order == "start" { - order = "start" - r.stopProcess(tid) - } + // Transfer the report history to the new process + history := task.parser.ReportHistory() + t.parser.ImportReportHistory(history) - parser := r.ffmpeg.NewProcessParser(t.logger, t.String(), t.reference, t.config.LogPatterns) - t.parser.TransferReportHistory(parser) - t.parser = parser + // Transfer the metadata to the new process + t.metadata = task.metadata - limitMode := "hard" - if r.enableSoftLimit { - limitMode = "soft" + if err := r.deleteProcess(id); err != nil { + return fmt.Errorf("delete process: %w", err) } - ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{ - Reconnect: t.config.Reconnect, - ReconnectDelay: time.Duration(t.config.ReconnectDelay) * time.Second, - StaleTimeout: time.Duration(t.config.StaleTimeout) * time.Second, - Timeout: time.Duration(t.config.Timeout) * time.Second, - LimitCPU: t.config.LimitCPU, - LimitMemory: t.config.LimitMemory, - LimitDuration: time.Duration(t.config.LimitWaitFor) * time.Second, - LimitMode: limitMode, - Scheduler: t.config.Scheduler, - Args: t.command, - Parser: t.parser, - Logger: t.logger, - OnArgs: r.onArgs(t.config.Clone()), - OnBeforeStart: func() error { - if !r.enableSoftLimit { - return nil - } - - if err := r.resources.Request(t.config.LimitCPU, t.config.LimitMemory); err != nil { - return err - } + r.tasks.Store(tid, t) - return nil - }, - }) - if err != nil { - return err - } + // set filesystem cleanup rules + r.setCleanup(tid, t.Config()) - t.ffmpeg = ffmpeg - t.valid = true + t.Restore() - if order == "start" { - r.startProcess(tid) - } + r.save() return nil } @@ -1584,283 +1388,32 @@ func (r *restream) reloadProcess(tid app.ProcessID) error { func (r *restream) GetProcessState(id app.ProcessID) (*app.State, error) { state := &app.State{} - r.lock.RLock() - defer r.lock.RUnlock() - - task, ok := r.tasks[id] + task, ok := r.tasks.Load(id) if !ok { return state, ErrUnknownProcess } - if !task.valid { - return state, nil - } - - status := task.ffmpeg.Status() - - state.Order = task.process.Order - state.State = status.State - state.States.Marshal(status.States) - state.Time = status.Time.Unix() - state.Memory = status.Memory.Current - state.CPU = status.CPU.Current / status.CPU.NCPU - state.LimitMode = status.LimitMode - state.Resources.CPU = status.CPU - state.Resources.Memory = status.Memory - state.Duration = status.Duration.Round(10 * time.Millisecond).Seconds() - state.Reconnect = -1 - state.Command = status.CommandArgs - state.LastLog = task.parser.LastLogline() - - if status.Reconnect >= time.Duration(0) { - state.Reconnect = status.Reconnect.Round(10 * time.Millisecond).Seconds() - } - - convertProgressFromParser(&state.Progress, task.parser.Progress()) - - for i, p := range state.Progress.Input { - if int(p.Index) >= len(task.process.Config.Input) { - continue - } - - state.Progress.Input[i].ID = task.process.Config.Input[p.Index].ID - } - - for i, p := range state.Progress.Output { - if int(p.Index) >= len(task.process.Config.Output) { - continue - } - - state.Progress.Output[i].ID = task.process.Config.Output[p.Index].ID - } - - return state, nil + return task.State() } -// convertProgressFromParser converts a ffmpeg/parse.Progress type into a restream/app.Progress type. -func convertProgressFromParser(progress *app.Progress, pprogress parse.Progress) { - progress.Started = pprogress.Started - progress.Frame = pprogress.Frame - progress.Packet = pprogress.Packet - progress.FPS = pprogress.FPS - progress.PPS = pprogress.PPS - progress.Quantizer = pprogress.Quantizer - progress.Size = pprogress.Size - progress.Time = pprogress.Time - progress.Bitrate = pprogress.Bitrate - progress.Speed = pprogress.Speed - progress.Drop = pprogress.Drop - progress.Dup = pprogress.Dup - - for _, pinput := range pprogress.Input { - input := app.ProgressIO{ - Address: pinput.Address, - Index: pinput.Index, - Stream: pinput.Stream, - Format: pinput.Format, - Type: pinput.Type, - Codec: pinput.Codec, - Coder: pinput.Coder, - Frame: pinput.Frame, - Keyframe: pinput.Keyframe, - Framerate: pinput.Framerate, - FPS: pinput.FPS, - Packet: pinput.Packet, - PPS: pinput.PPS, - Size: pinput.Size, - Bitrate: pinput.Bitrate, - Extradata: pinput.Extradata, - Pixfmt: pinput.Pixfmt, - Quantizer: pinput.Quantizer, - Width: pinput.Width, - Height: pinput.Height, - Sampling: pinput.Sampling, - Layout: pinput.Layout, - Channels: pinput.Channels, - AVstream: nil, - } +func (r *restream) GetProcessReport(id app.ProcessID) (*app.Report, error) { + report := &app.Report{} - if pinput.AVstream != nil { - avstream := &app.AVstream{ - Input: app.AVstreamIO{ - State: pinput.AVstream.Input.State, - Packet: pinput.AVstream.Input.Packet, - Time: pinput.AVstream.Input.Time, - Size: pinput.AVstream.Input.Size, - }, - Output: app.AVstreamIO{ - State: pinput.AVstream.Output.State, - Packet: pinput.AVstream.Output.Packet, - Time: pinput.AVstream.Output.Time, - Size: pinput.AVstream.Output.Size, - }, - Aqueue: pinput.AVstream.Aqueue, - Queue: pinput.AVstream.Queue, - Dup: pinput.AVstream.Dup, - Drop: pinput.AVstream.Drop, - Enc: pinput.AVstream.Enc, - Looping: pinput.AVstream.Looping, - LoopingRuntime: pinput.AVstream.LoopingRuntime, - Duplicating: pinput.AVstream.Duplicating, - GOP: pinput.AVstream.GOP, - Mode: pinput.AVstream.Mode, - } - - input.AVstream = avstream - } - - progress.Input = append(progress.Input, input) - } - - for _, poutput := range pprogress.Output { - output := app.ProgressIO{ - Address: poutput.Address, - Index: poutput.Index, - Stream: poutput.Stream, - Format: poutput.Format, - Type: poutput.Type, - Codec: poutput.Codec, - Coder: poutput.Coder, - Frame: poutput.Frame, - Keyframe: poutput.Keyframe, - Framerate: poutput.Framerate, - FPS: poutput.FPS, - Packet: poutput.Packet, - PPS: poutput.PPS, - Size: poutput.Size, - Bitrate: poutput.Bitrate, - Extradata: poutput.Extradata, - Pixfmt: poutput.Pixfmt, - Quantizer: poutput.Quantizer, - Width: poutput.Width, - Height: poutput.Height, - Sampling: poutput.Sampling, - Layout: poutput.Layout, - Channels: poutput.Channels, - AVstream: nil, - } - - progress.Output = append(progress.Output, output) - } - - for _, pgraph := range pprogress.Mapping.Graphs { - graph := app.GraphElement{ - Index: pgraph.Index, - Name: pgraph.Name, - Filter: pgraph.Filter, - DstName: pgraph.DstName, - DstFilter: pgraph.DstFilter, - Inpad: pgraph.Inpad, - Outpad: pgraph.Outpad, - Timebase: pgraph.Timebase, - Type: pgraph.Type, - Format: pgraph.Format, - Sampling: pgraph.Sampling, - Layout: pgraph.Layout, - Width: pgraph.Width, - Height: pgraph.Height, - } - - progress.Mapping.Graphs = append(progress.Mapping.Graphs, graph) + task, ok := r.tasks.Load(id) + if !ok { + return report, ErrUnknownProcess } - for _, pmapping := range pprogress.Mapping.Mapping { - mapping := app.GraphMapping{ - Input: pmapping.Input, - Output: pmapping.Output, - Index: pmapping.Index, - Name: pmapping.Name, - Copy: pmapping.Copy, - } - - progress.Mapping.Mapping = append(progress.Mapping.Mapping, mapping) - } + return task.Report() } -func (r *restream) GetProcessLog(id app.ProcessID) (*app.Report, error) { - log := &app.Report{} - - r.lock.RLock() - defer r.lock.RUnlock() - - task, ok := r.tasks[id] +func (r *restream) SetProcessReport(id app.ProcessID, report *app.Report) error { + task, ok := r.tasks.Load(id) if !ok { - return log, ErrUnknownProcess - } - - if !task.valid { - return log, nil - } - - current := task.parser.Report() - - log.CreatedAt = current.CreatedAt - log.Prelude = current.Prelude - log.Log = make([]app.LogLine, len(current.Log)) - for i, line := range current.Log { - log.Log[i] = app.LogLine{ - Timestamp: line.Timestamp, - Data: line.Data, - } - } - log.Matches = current.Matches - - history := task.parser.ReportHistory() - - for _, h := range history { - e := app.ReportHistoryEntry{ - ReportEntry: app.ReportEntry{ - CreatedAt: h.CreatedAt, - Prelude: h.Prelude, - Matches: h.Matches, - }, - ExitedAt: h.ExitedAt, - ExitState: h.ExitState, - Usage: app.ProcessUsage{ - CPU: app.ProcessUsageCPU{ - NCPU: h.Usage.CPU.NCPU, - Average: h.Usage.CPU.Average, - Max: h.Usage.CPU.Max, - Limit: h.Usage.CPU.Limit, - }, - Memory: app.ProcessUsageMemory{ - Average: h.Usage.Memory.Average, - Max: h.Usage.Memory.Max, - Limit: h.Usage.Memory.Limit, - }, - }, - } - - convertProgressFromParser(&e.Progress, h.Progress) - - for i, p := range e.Progress.Input { - if int(p.Index) >= len(task.process.Config.Input) { - continue - } - - e.Progress.Input[i].ID = task.process.Config.Input[p.Index].ID - } - - for i, p := range e.Progress.Output { - if int(p.Index) >= len(task.process.Config.Output) { - continue - } - - e.Progress.Output[i].ID = task.process.Config.Output[p.Index].ID - } - - e.ReportEntry.Log = make([]app.LogLine, len(h.Log)) - for i, line := range h.Log { - e.ReportEntry.Log[i] = app.LogLine{ - Timestamp: line.Timestamp, - Data: line.Data, - } - } - - log.History = append(log.History, e) + return ErrUnknownProcess } - return log, nil + return task.SetReport(report) } func (r *restream) SearchProcessLogHistory(idpattern, refpattern, state string, from, to *time.Time) []app.ReportHistorySearchResult { @@ -1868,33 +1421,22 @@ func (r *restream) SearchProcessLogHistory(idpattern, refpattern, state string, ids := r.GetProcessIDs(idpattern, refpattern, "", "") - r.lock.RLock() - defer r.lock.RUnlock() - for _, id := range ids { - task, ok := r.tasks[id] + task, ok := r.tasks.Load(id) if !ok { continue } - presult := task.parser.SearchReportHistory(state, from, to) + presult := task.SearchReportHistory(state, from, to) - for _, f := range presult { - result = append(result, app.ReportHistorySearchResult{ - ProcessID: task.id, - Reference: task.reference, - ExitState: f.ExitState, - CreatedAt: f.CreatedAt, - ExitedAt: f.ExitedAt, - }) - } + result = append(result, presult...) } return result } func (r *restream) Probe(config *app.Config, timeout time.Duration) app.Probe { - appprobe := app.Probe{} + probe := app.Probe{} config = config.Clone() @@ -1902,16 +1444,16 @@ func (r *restream) Probe(config *app.Config, timeout time.Duration) app.Probe { err := r.resolveAddresses(r.tasks, config) if err != nil { - appprobe.Log = append(appprobe.Log, err.Error()) - return appprobe + probe.Log = append(probe.Log, err.Error()) + return probe } resolveDynamicPlaceholder(config, r.replace) _, err = validateConfig(config, r.fs.list, r.ffmpeg) if err != nil { - appprobe.Log = append(appprobe.Log, err.Error()) - return appprobe + probe.Log = append(probe.Log, err.Error()) + return probe } var command []string @@ -1950,51 +1492,22 @@ func (r *restream) Probe(config *app.Config, timeout time.Duration) app.Probe { formatter := log.NewConsoleFormatter(false) for _, e := range logbuffer.Events() { - appprobe.Log = append(appprobe.Log, strings.TrimSpace(formatter.String(e))) + probe.Log = append(probe.Log, strings.TrimSpace(formatter.String(e))) } - appprobe.Log = append(appprobe.Log, err.Error()) + probe.Log = append(probe.Log, err.Error()) - return appprobe + return probe } ffmpeg.Start() wg.Wait() - convertProbeFromProber(&appprobe, prober.Probe()) + p := prober.Probe() + probe.UnmarshalProber(&p) - return appprobe -} - -// convertProbeFromProber converts a ffmpeg/probe.Probe type into an restream/app.Probe type. -func convertProbeFromProber(appprobe *app.Probe, pprobe probe.Probe) { - appprobe.Log = make([]string, len(pprobe.Log)) - copy(appprobe.Log, pprobe.Log) - - for _, s := range pprobe.Streams { - stream := app.ProbeIO{ - Address: s.Address, - Index: s.Index, - Stream: s.Stream, - Language: s.Language, - Format: s.Format, - Type: s.Type, - Codec: s.Codec, - Coder: s.Coder, - Bitrate: s.Bitrate, - Duration: s.Duration, - Pixfmt: s.Pixfmt, - Width: s.Width, - Height: s.Height, - FPS: s.FPS, - Sampling: s.Sampling, - Layout: s.Layout, - Channels: s.Channels, - } - - appprobe.Streams = append(appprobe.Streams, stream) - } + return probe } func (r *restream) Skills() skills.Skills { @@ -2006,10 +1519,7 @@ func (r *restream) ReloadSkills() error { } func (r *restream) GetPlayout(id app.ProcessID, inputid string) (string, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - task, ok := r.tasks[id] + task, ok := r.tasks.Load(id) if !ok { return "", ErrUnknownProcess } @@ -2026,33 +1536,15 @@ func (r *restream) GetPlayout(id app.ProcessID, inputid string) (string, error) return "127.0.0.1:" + strconv.Itoa(port), nil } -var ErrMetadataKeyNotFound = errors.New("unknown key") - func (r *restream) SetProcessMetadata(id app.ProcessID, key string, data interface{}) error { - if len(key) == 0 { - return fmt.Errorf("a key for storing the data has to be provided") - } - - r.lock.Lock() - defer r.lock.Unlock() - - task, ok := r.tasks[id] + task, ok := r.tasks.Load(id) if !ok { return ErrUnknownProcess } - if task.metadata == nil { - task.metadata = make(map[string]interface{}) - } - - if data == nil { - delete(task.metadata, key) - } else { - task.metadata[key] = data - } - - if len(task.metadata) == 0 { - task.metadata = nil + err := task.SetMetadata(key, data) + if err != nil { + return err } r.save() @@ -2061,24 +1553,12 @@ func (r *restream) SetProcessMetadata(id app.ProcessID, key string, data interfa } func (r *restream) GetProcessMetadata(id app.ProcessID, key string) (interface{}, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - task, ok := r.tasks[id] + task, ok := r.tasks.Load(id) if !ok { return nil, ErrUnknownProcess } - if len(key) == 0 { - return task.metadata, nil - } - - data, ok := task.metadata[key] - if !ok { - return nil, ErrMetadataKeyNotFound - } - - return data, nil + return task.GetMetadata(key) } func (r *restream) SetMetadata(key string, data interface{}) error { diff --git a/restream/restream_test.go b/restream/core_test.go similarity index 97% rename from restream/restream_test.go rename to restream/core_test.go index 3932b6a7..aad54bec 100644 --- a/restream/restream_test.go +++ b/restream/core_test.go @@ -10,8 +10,8 @@ import ( "github.com/datarhei/core/v16/ffmpeg" "github.com/datarhei/core/v16/iam" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/internal/testhelper" "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/net" @@ -47,7 +47,7 @@ func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmp return nil, err } - policyAdapter, err := iamaccess.NewJSONAdapter(memfs, "./policy.json", nil) + policyAdapter, err := policy.NewJSONAdapter(memfs, "./policy.json", nil) if err != nil { return nil, err } @@ -354,7 +354,7 @@ func TestUpdateProcessLogHistoryTransfer(t *testing.T) { return state.State == "running" }, 10*time.Second, time.Second) - log, err := rs.GetProcessLog(tid1) + log, err := rs.GetProcessReport(tid1) require.NoError(t, err) require.Equal(t, 0, len(log.History)) @@ -379,7 +379,7 @@ func TestUpdateProcessLogHistoryTransfer(t *testing.T) { return state.State == "running" }, 10*time.Second, time.Second) - log, err = rs.GetProcessLog(tid2) + log, err = rs.GetProcessReport(tid2) require.NoError(t, err) require.Equal(t, 1, len(log.History)) @@ -617,7 +617,7 @@ func TestParseProcessPattern(t *testing.T) { rs.StopProcess(tid) - log, err := rs.GetProcessLog(tid) + log, err := rs.GetProcessReport(tid) require.NoError(t, err) require.Equal(t, 1, len(log.History)) @@ -684,10 +684,10 @@ func TestLog(t *testing.T) { rs.AddProcess(process) - _, err = rs.GetProcessLog(app.ProcessID{ID: "foobar"}) + _, err = rs.GetProcessReport(app.ProcessID{ID: "foobar"}) require.Error(t, err) - log, err := rs.GetProcessLog(tid) + log, err := rs.GetProcessReport(tid) require.NoError(t, err) require.Equal(t, 0, len(log.Prelude)) require.Equal(t, 0, len(log.Log)) @@ -698,7 +698,7 @@ func TestLog(t *testing.T) { time.Sleep(3 * time.Second) - log, _ = rs.GetProcessLog(tid) + log, _ = rs.GetProcessReport(tid) require.NotEqual(t, 0, len(log.Prelude)) require.NotEqual(t, 0, len(log.Log)) @@ -707,7 +707,7 @@ func TestLog(t *testing.T) { rs.StopProcess(tid) - log, _ = rs.GetProcessLog(tid) + log, _ = rs.GetProcessReport(tid) require.Equal(t, 0, len(log.Prelude)) require.Equal(t, 0, len(log.Log)) @@ -728,14 +728,14 @@ func TestLogTransfer(t *testing.T) { time.Sleep(3 * time.Second) rs.StopProcess(tid) - log, _ := rs.GetProcessLog(tid) + log, _ := rs.GetProcessReport(tid) require.Equal(t, 1, len(log.History)) err = rs.UpdateProcess(tid, process) require.NoError(t, err) - log, _ = rs.GetProcessLog(tid) + log, _ = rs.GetProcessReport(tid) require.Equal(t, 1, len(log.History)) } @@ -884,9 +884,17 @@ func TestTeeAddressReference(t *testing.T) { r := rs.(*restream) - require.Equal(t, "http://example.com/live.m3u8", r.tasks[app.ProcessID{ID: "process2"}].config.Input[0].Address) - require.Equal(t, "http://example.com/live.m3u8", r.tasks[app.ProcessID{ID: "process3"}].config.Input[0].Address) - require.Equal(t, "rtmp://example.com/live.stream?token=123", r.tasks[app.ProcessID{ID: "process4"}].config.Input[0].Address) + task, ok := r.tasks.Load(app.ProcessID{ID: "process2"}) + require.True(t, ok) + require.Equal(t, "http://example.com/live.m3u8", task.config.Input[0].Address) + + task, ok = r.tasks.Load(app.ProcessID{ID: "process3"}) + require.True(t, ok) + require.Equal(t, "http://example.com/live.m3u8", task.config.Input[0].Address) + + task, ok = r.tasks.Load(app.ProcessID{ID: "process4"}) + require.True(t, ok) + require.Equal(t, "rtmp://example.com/live.stream?token=123", task.config.Input[0].Address) } func TestConfigValidation(t *testing.T) { @@ -1466,7 +1474,7 @@ func TestProcessReplacer(t *testing.T) { LogPatterns: []string{}, } - task, ok := rs.tasks[app.ProcessID{ID: "314159265359"}] + task, ok := rs.tasks.Load(app.ProcessID{ID: "314159265359"}) require.True(t, ok) require.Equal(t, process, task.config) @@ -1493,7 +1501,7 @@ func TestProcessLogPattern(t *testing.T) { time.Sleep(5 * time.Second) - log, err := rs.GetProcessLog(tid) + log, err := rs.GetProcessReport(tid) require.NoError(t, err) require.Equal(t, 1, len(log.Matches)) @@ -1517,7 +1525,7 @@ func TestProcessLimit(t *testing.T) { rs := rsi.(*restream) - task, ok := rs.tasks[app.ProcessID{ID: process.ID}] + task, ok := rs.tasks.Load(app.ProcessID{ID: process.ID}) require.True(t, ok) status := task.ffmpeg.Status() diff --git a/restream/fs/fs.go b/restream/fs/fs.go index 43ea5b62..348051d8 100644 --- a/restream/fs/fs.go +++ b/restream/fs/fs.go @@ -149,6 +149,14 @@ func (rfs *filesystem) UnsetCleanup(id string) { } func (rfs *filesystem) cleanup() { + rfs.cleanupLock.RLock() + nPatterns := len(rfs.cleanupPatterns) + rfs.cleanupLock.RUnlock() + + if nPatterns == 0 { + return + } + filesAndDirs := rfs.Filesystem.List("/", fs.ListOptions{}) sort.SliceStable(filesAndDirs, func(i, j int) bool { return filesAndDirs[i].ModTime().Before(filesAndDirs[j].ModTime()) }) diff --git a/restream/fs/fs_test.go b/restream/fs/fs_test.go index a4b7923a..46d3a964 100644 --- a/restream/fs/fs_test.go +++ b/restream/fs/fs_test.go @@ -32,15 +32,15 @@ func TestMaxFiles(t *testing.T) { }, }) - cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"), -1) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"), -1) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"), -1) require.Eventually(t, func() bool { return cleanfs.Files() == 3 }, 3*time.Second, time.Second) - cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"), -1) require.Eventually(t, func() bool { if cleanfs.Files() != 3 { @@ -81,15 +81,15 @@ func TestMaxAge(t *testing.T) { }, }) - cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"), -1) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"), -1) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"), -1) require.Eventually(t, func() bool { return cleanfs.Files() == 0 }, 10*time.Second, time.Second) - cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"), -1) require.Eventually(t, func() bool { if cleanfs.Files() != 1 { @@ -130,15 +130,15 @@ func TestUnsetCleanup(t *testing.T) { }, }) - cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"), -1) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"), -1) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"), -1) require.Eventually(t, func() bool { return cleanfs.Files() == 3 }, 3*time.Second, time.Second) - cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"), -1) require.Eventually(t, func() bool { if cleanfs.Files() != 3 { @@ -158,7 +158,7 @@ func TestUnsetCleanup(t *testing.T) { cleanfs.UnsetCleanup("foobar") - cleanfs.WriteFileReader("/chunk_4.ts", strings.NewReader("chunk_4")) + cleanfs.WriteFileReader("/chunk_4.ts", strings.NewReader("chunk_4"), -1) require.Eventually(t, func() bool { if cleanfs.Files() != 4 { diff --git a/restream/manager.go b/restream/manager.go new file mode 100644 index 00000000..c2ce403d --- /dev/null +++ b/restream/manager.go @@ -0,0 +1,64 @@ +package restream + +import ( + "github.com/datarhei/core/v16/restream/app" + "github.com/puzpuzpuz/xsync/v3" +) + +type Storage struct { + tasks *xsync.MapOf[app.ProcessID, *task] +} + +func NewStorage() *Storage { + m := &Storage{ + tasks: xsync.NewMapOf[app.ProcessID, *task](), + } + + return m +} + +func (m *Storage) Range(f func(key app.ProcessID, value *task) bool) { + m.tasks.Range(f) +} + +func (m *Storage) Store(id app.ProcessID, t *task) { + m.tasks.Store(id, t) +} + +func (m *Storage) LoadOrStore(id app.ProcessID, t *task) (*task, bool) { + return m.tasks.LoadOrStore(id, t) +} + +func (m *Storage) Has(id app.ProcessID) bool { + _, hasTask := m.Load(id) + + return hasTask +} + +func (m *Storage) Load(id app.ProcessID) (*task, bool) { + return m.tasks.Load(id) +} + +func (m *Storage) Delete(id app.ProcessID) bool { + if t, ok := m.Load(id); ok { + m.tasks.Delete(id) + t.Destroy() + return true + } + + return false +} + +func (m *Storage) Size() int { + return m.tasks.Size() +} + +func (m *Storage) Clear() { + m.tasks.Range(func(_ app.ProcessID, t *task) bool { + t.Destroy() + + return true + }) + + m.tasks.Clear() +} diff --git a/restream/rewrite/rewrite_test.go b/restream/rewrite/rewrite_test.go index 10a3cac8..b8cd6337 100644 --- a/restream/rewrite/rewrite_test.go +++ b/restream/rewrite/rewrite_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/datarhei/core/v16/iam" - iamaccess "github.com/datarhei/core/v16/iam/access" iamidentity "github.com/datarhei/core/v16/iam/identity" + "github.com/datarhei/core/v16/iam/policy" "github.com/datarhei/core/v16/io/fs" "github.com/stretchr/testify/require" @@ -18,7 +18,7 @@ func getIAM(enableBasic bool) (iam.IAM, error) { return nil, err } - policyAdapter, err := iamaccess.NewJSONAdapter(memfs, "./policy.json", nil) + policyAdapter, err := policy.NewJSONAdapter(memfs, "./policy.json", nil) if err != nil { return nil, err } diff --git a/restream/store/json/data.go b/restream/store/json/data.go index b8e7a932..c9e3d0fd 100644 --- a/restream/store/json/data.go +++ b/restream/store/json/data.go @@ -186,7 +186,7 @@ func MarshalProcess(a *app.Process) Process { Config: ProcessConfig{}, CreatedAt: a.CreatedAt, UpdatedAt: a.UpdatedAt, - Order: a.Order, + Order: a.Order.String(), } p.Config.Marshal(a.Config) @@ -203,7 +203,7 @@ func UnmarshalProcess(p Process) *app.Process { Config: &app.Config{}, CreatedAt: p.CreatedAt, UpdatedAt: p.UpdatedAt, - Order: p.Order, + Order: app.NewOrder(p.Order), } a.Config = p.Config.Unmarshal() diff --git a/restream/store/json/json_test.go b/restream/store/json/json_test.go index 2771c8fd..94680b9d 100644 --- a/restream/store/json/json_test.go +++ b/restream/store/json/json_test.go @@ -73,7 +73,7 @@ func TestStoreLoad(t *testing.T) { }, CreatedAt: 0, UpdatedAt: 0, - Order: "stop", + Order: app.NewOrder("stop"), }, Metadata: map[string]interface{}{ "some": "data", @@ -112,7 +112,7 @@ func TestStoreLoad(t *testing.T) { }, CreatedAt: 0, UpdatedAt: 0, - Order: "stop", + Order: app.NewOrder("stop"), }, Metadata: map[string]interface{}{ "some-more": "data", diff --git a/restream/task.go b/restream/task.go new file mode 100644 index 00000000..611702ef --- /dev/null +++ b/restream/task.go @@ -0,0 +1,553 @@ +package restream + +import ( + "errors" + "maps" + "time" + + "github.com/datarhei/core/v16/ffmpeg/parse" + "github.com/datarhei/core/v16/glob" + "github.com/datarhei/core/v16/log" + "github.com/datarhei/core/v16/process" + "github.com/datarhei/core/v16/restream/app" + + "github.com/puzpuzpuz/xsync/v3" +) + +var ErrInvalidProcessConfig = errors.New("invalid process config") +var ErrMetadataKeyNotFound = errors.New("unknown metadata key") +var ErrMetadataKeyRequired = errors.New("a key for storing metadata is required") + +type task struct { + valid bool + id string // ID of the task/process + owner string + domain string + reference string + process *app.Process + config *app.Config // Process config with replaced static placeholders + command []string // The actual command parameter for ffmpeg + ffmpeg process.Process + parser parse.Parser + playout map[string]int + logger log.Logger + usesDisk bool // Whether this task uses the disk + metadata map[string]interface{} + + lock *xsync.RBMutex +} + +func NewTask(process *app.Process, logger log.Logger) *task { + t := &task{ + id: process.ID, + owner: process.Owner, + domain: process.Domain, + reference: process.Reference, + process: process, + config: process.Config.Clone(), + playout: map[string]int{}, + logger: logger, + metadata: nil, + lock: xsync.NewRBMutex(), + } + + return t +} + +func (t *task) IsValid() bool { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + return t.valid +} + +func (t *task) Valid(valid bool) { + t.lock.Lock() + defer t.lock.Unlock() + + t.valid = valid +} + +func (t *task) UsesDisk() bool { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + return t.usesDisk +} + +func (t *task) ID() app.ProcessID { + return app.ProcessID{ + ID: t.id, + Domain: t.domain, + } +} + +func (t *task) String() string { + return t.ID().String() +} + +// Restore restores the task's order +func (t *task) Restore() error { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if !t.valid { + return ErrInvalidProcessConfig + } + + if t.ffmpeg == nil { + return ErrInvalidProcessConfig + } + + if t.process == nil { + return ErrInvalidProcessConfig + } + + if t.process.Order.String() == "start" { + err := t.ffmpeg.Start() + if err != nil { + return err + } + } + + return nil +} + +func (t *task) Start() error { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if !t.valid { + return ErrInvalidProcessConfig + } + + if t.ffmpeg == nil { + return nil + } + + if t.process == nil { + return nil + } + + status := t.ffmpeg.Status() + + if t.process.Order.String() == "start" && status.Order == "start" { + return nil + } + + t.process.Order.Set("start") + + t.ffmpeg.Start() + + return nil +} + +func (t *task) Stop() error { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.ffmpeg == nil { + return nil + } + + if t.process == nil { + return nil + } + + status := t.ffmpeg.Status() + + if t.process.Order.String() == "stop" && status.Order == "stop" { + return nil + } + + t.process.Order.Set("stop") + + t.ffmpeg.Stop(true) + + return nil +} + +// Kill stops a process without changing the tasks order +func (t *task) Kill() { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.ffmpeg == nil { + return + } + + t.ffmpeg.Stop(true) +} + +func (t *task) Restart() error { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if !t.valid { + return ErrInvalidProcessConfig + } + + if t.process == nil { + return nil + } + + if t.process.Order.String() == "stop" { + return nil + } + + if t.ffmpeg != nil { + t.ffmpeg.Stop(true) + t.ffmpeg.Start() + } + + return nil +} + +func (t *task) State() (*app.State, error) { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + state := &app.State{} + + if !t.valid { + return state, nil + } + + if t.ffmpeg == nil { + return state, nil + } + + if t.parser == nil { + return state, nil + } + + if t.process == nil { + return state, nil + } + + status := t.ffmpeg.Status() + + state.Order = t.process.Order.String() + state.State = status.State + state.States.Marshal(status.States) + state.Time = status.Time.Unix() + state.Memory = status.Memory.Current + state.CPU = status.CPU.Current / status.CPU.NCPU + state.LimitMode = status.LimitMode + state.Resources.CPU = status.CPU + state.Resources.Memory = status.Memory + state.Duration = status.Duration.Round(10 * time.Millisecond).Seconds() + state.Reconnect = -1 + state.Command = status.CommandArgs + state.LastLog = t.parser.LastLogline() + + if status.Reconnect >= time.Duration(0) { + state.Reconnect = status.Reconnect.Round(10 * time.Millisecond).Seconds() + } + + progress := t.parser.Progress() + state.Progress.UnmarshalParser(&progress) + + for i, p := range state.Progress.Input { + if int(p.Index) >= len(t.process.Config.Input) { + continue + } + + state.Progress.Input[i].ID = t.process.Config.Input[p.Index].ID + } + + for i, p := range state.Progress.Output { + if int(p.Index) >= len(t.process.Config.Output) { + continue + } + + state.Progress.Output[i].ID = t.process.Config.Output[p.Index].ID + } + + return state, nil +} + +func (t *task) Report() (*app.Report, error) { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + report := &app.Report{} + + if !t.valid { + return report, nil + } + + if t.parser == nil { + return report, nil + } + + current := t.parser.Report() + + report.UnmarshalParser(¤t) + + history := t.parser.ReportHistory() + + report.History = make([]app.ReportHistoryEntry, len(history)) + + for i, h := range history { + report.History[i].UnmarshalParser(&h) + e := &report.History[i] + + for i, p := range e.Progress.Input { + if int(p.Index) >= len(t.process.Config.Input) { + continue + } + + e.Progress.Input[i].ID = t.process.Config.Input[p.Index].ID + } + + for i, p := range e.Progress.Output { + if int(p.Index) >= len(t.process.Config.Output) { + continue + } + + e.Progress.Output[i].ID = t.process.Config.Output[p.Index].ID + } + } + + return report, nil +} + +func (t *task) SetReport(report *app.Report) error { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if !t.valid { + return nil + } + + if t.parser == nil { + return nil + } + + _, history := report.MarshalParser() + + t.parser.ImportReportHistory(history) + + return nil +} + +func (t *task) SearchReportHistory(state string, from, to *time.Time) []app.ReportHistorySearchResult { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.parser == nil { + return []app.ReportHistorySearchResult{} + } + + result := []app.ReportHistorySearchResult{} + + presult := t.parser.SearchReportHistory(state, from, to) + + for _, f := range presult { + result = append(result, app.ReportHistorySearchResult{ + ProcessID: t.id, + Reference: t.reference, + ExitState: f.ExitState, + CreatedAt: f.CreatedAt, + ExitedAt: f.ExitedAt, + }) + } + + return result +} + +func (t *task) SetMetadata(key string, data interface{}) error { + t.lock.Lock() + defer t.lock.Unlock() + + if len(key) == 0 { + return ErrMetadataKeyRequired + } + + if t.metadata == nil { + t.metadata = make(map[string]interface{}) + } + + if data == nil { + delete(t.metadata, key) + } else { + t.metadata[key] = data + } + + if len(t.metadata) == 0 { + t.metadata = nil + } + + return nil +} + +func (t *task) ImportMetadata(m map[string]interface{}) { + t.lock.Lock() + defer t.lock.Unlock() + + t.metadata = m +} + +func (t *task) GetMetadata(key string) (interface{}, error) { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if len(key) == 0 { + if t.metadata == nil { + return nil, nil + } + + return maps.Clone(t.metadata), nil + } + + if t.metadata == nil { + return nil, ErrMetadataKeyNotFound + } + + data, ok := t.metadata[key] + if !ok { + return nil, ErrMetadataKeyNotFound + } + + return data, nil +} + +func (t *task) ExportMetadata() map[string]interface{} { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + return t.metadata +} + +func (t *task) Limit(cpu, memory bool) bool { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.ffmpeg == nil { + return false + } + + t.ffmpeg.Limit(cpu, memory) + + return true +} + +func (t *task) Equal(config *app.Config) bool { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.process == nil { + return false + } + + return t.process.Config.Equal(config) +} + +func (t *task) Config() *app.Config { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.config == nil { + return nil + } + + return t.config.Clone() +} + +func (t *task) Destroy() { + t.Stop() + + t.lock.Lock() + defer t.lock.Unlock() + + t.valid = false + t.process = nil + t.config = nil + t.command = nil + t.ffmpeg = nil + t.parser = nil + t.metadata = map[string]interface{}{} +} + +func (t *task) Match(id, reference, owner, domain glob.Glob) bool { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + count := 0 + matches := 0 + + if id != nil { + count++ + if match := id.Match(t.id); match { + matches++ + } + } + + if reference != nil { + count++ + if match := reference.Match(t.reference); match { + matches++ + } + } + + if owner != nil { + count++ + if match := owner.Match(t.owner); match { + matches++ + } + } + + if domain != nil { + count++ + if match := domain.Match(t.domain); match { + matches++ + } + } + + return count == matches +} + +func (t *task) Process() *app.Process { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.process == nil { + return nil + } + + return t.process.Clone() +} + +func (t *task) Order() string { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.process == nil { + return "" + } + + return t.process.Order.String() +} + +func (t *task) ExportParserReportHistory() []parse.ReportHistoryEntry { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.parser == nil { + return nil + } + + return t.parser.ReportHistory() +} + +func (t *task) ImportParserReportHistory(report []parse.ReportHistoryEntry) { + token := t.lock.RLock() + defer t.lock.RUnlock(token) + + if t.parser == nil { + return + } + + t.parser.ImportReportHistory(report) +} diff --git a/vendor/github.com/99designs/gqlgen/.golangci.yml b/vendor/github.com/99designs/gqlgen/.golangci.yml index 97a514b9..098727cb 100644 --- a/vendor/github.com/99designs/gqlgen/.golangci.yml +++ b/vendor/github.com/99designs/gqlgen/.golangci.yml @@ -1,25 +1,74 @@ run: tests: true + timeout: 5m linters-settings: + gocritic: + enabled-checks: + - emptyStringTest + - equalFold + - httpNoBody + - nilValReturn + - paramTypeCombine + - preferFprint + - yodaStyleExpr errcheck: exclude-functions: - (io.Writer).Write - io.Copy - io.WriteString + perfsprint: + int-conversion: false + err-error: false + errorf: true + sprintf1: false + strconcat: false revive: enable-all-rules: false rules: - name: empty-lines + - name: use-any + - name: struct-tag + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: error-return + - name: error-naming + - name: exported + disabled: true + - name: if-return + - name: increment-decrement + - name: var-declaration + - name: package-comments + disabled: true + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: superfluous-else + - name: unused-parameter + disabled: true + - name: unreachable-code + - name: redefines-builtin-id testifylint: disable-all: true enable: + - blank-import - bool-compare - compares + - empty - error-is-as - error-nil - expected-actual + - float-compare + - go-require + - len + - negative-positive - nil-compare + - require-error + - useless-assert linters: disable-all: true @@ -35,6 +84,7 @@ linters: - ineffassign - misspell - nakedret + - perfsprint - prealloc - revive - staticcheck @@ -52,3 +102,14 @@ issues: linters: - dupl - errcheck + # It's autogenerated code. + - path: codegen/testserver/.*/resolver\.go + linters: + - gocritic + # Disable revive.use-any for backwards compatibility + - path: graphql/map.go + text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + - path: codegen/testserver/followschema/resolver.go + text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + - path: codegen/testserver/singlefile/resolver.go + text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'" diff --git a/vendor/github.com/99designs/gqlgen/api/generate.go b/vendor/github.com/99designs/gqlgen/api/generate.go index 8102b744..9e7b4188 100644 --- a/vendor/github.com/99designs/gqlgen/api/generate.go +++ b/vendor/github.com/99designs/gqlgen/api/generate.go @@ -90,11 +90,11 @@ func Generate(cfg *config.Config, option ...Option) error { } } // Merge again now that the generated models have been injected into the typemap - data_plugins := make([]interface{}, len(plugins)) + dataPlugins := make([]any, len(plugins)) for index := range plugins { - data_plugins[index] = plugins[index] + dataPlugins[index] = plugins[index] } - data, err := codegen.BuildData(cfg, data_plugins...) + data, err := codegen.BuildData(cfg, dataPlugins...) if err != nil { return fmt.Errorf("merging type systems failed: %w", err) } diff --git a/vendor/github.com/99designs/gqlgen/codegen/args.go b/vendor/github.com/99designs/gqlgen/codegen/args.go index 2f174332..983a3a02 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/args.go +++ b/vendor/github.com/99designs/gqlgen/codegen/args.go @@ -19,11 +19,11 @@ type ArgSet struct { type FieldArgument struct { *ast.ArgumentDefinition TypeReference *config.TypeReference - VarName string // The name of the var in go - Object *Object // A link back to the parent object - Default interface{} // The default value + VarName string // The name of the var in go + Object *Object // A link back to the parent object + Default any // The default value Directives []*Directive - Value interface{} // value set in Data + Value any // value set in Data } // ImplDirectives get not Builtin and location ARGUMENT_DEFINITION directive diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/binder.go b/vendor/github.com/99designs/gqlgen/codegen/config/binder.go index 6a488032..91b6e500 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/binder.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/binder.go @@ -61,7 +61,7 @@ func (b *Binder) FindTypeFromName(name string) (types.Type, error) { return b.FindType(pkgName, typeName) } -func (b *Binder) FindType(pkgName string, typeName string) (types.Type, error) { +func (b *Binder) FindType(pkgName, typeName string) (types.Type, error) { if pkgName == "" { if typeName == "map[string]interface{}" { return MapType, nil @@ -99,7 +99,7 @@ var ( func (b *Binder) DefaultUserObject(name string) (types.Type, error) { models := b.cfg.Models[name].Model if len(models) == 0 { - return nil, fmt.Errorf(name + " not found in typemap") + return nil, fmt.Errorf("%s not found in typemap", name) } if models[0] == "map[string]interface{}" { @@ -123,9 +123,9 @@ func (b *Binder) DefaultUserObject(name string) (types.Type, error) { return obj.Type(), nil } -func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) { +func (b *Binder) FindObject(pkgName, typeName string) (types.Object, error) { if pkgName == "" { - return nil, fmt.Errorf("package cannot be nil") + return nil, errors.New("package cannot be nil") } pkg := b.pkgs.LoadWithTypes(pkgName) @@ -349,7 +349,7 @@ func isIntf(t types.Type) bool { func unwrapOmittable(t types.Type) (types.Type, bool) { if t == nil { - return t, false + return nil, false } named, ok := t.(*types.Named) if !ok { diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/config.go b/vendor/github.com/99designs/gqlgen/codegen/config/config.go index 39120e56..3228756c 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/config.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/config.go @@ -2,6 +2,7 @@ package config import ( "bytes" + "errors" "fmt" "go/types" "io" @@ -40,6 +41,7 @@ type Config struct { OmitGQLGenVersionInFileNotice bool `yaml:"omit_gqlgen_version_in_file_notice,omitempty"` OmitRootModels bool `yaml:"omit_root_models,omitempty"` OmitResolverFields bool `yaml:"omit_resolver_fields,omitempty"` + OmitPanicHandler bool `yaml:"omit_panic_handler,omitempty"` StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"` ReturnPointersInUmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"` ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"` @@ -305,7 +307,7 @@ func (c *Config) injectTypesFromSchema() error { if ma := bd.Arguments.ForName("models"); ma != nil { if mvs, err := ma.Value.Value(nil); err == nil { - for _, mv := range mvs.([]interface{}) { + for _, mv := range mvs.([]any) { c.Models.Add(schemaType.Name, mv.(string)) } } @@ -353,25 +355,11 @@ func (c *Config) injectTypesFromSchema() error { if efds := schemaType.Directives.ForNames("goExtraField"); len(efds) != 0 { for _, efd := range efds { - if fn := efd.Arguments.ForName("name"); fn != nil { - extraFieldName := "" - if fnv, err := fn.Value.Value(nil); err == nil { - extraFieldName = fnv.(string) - } - - if extraFieldName == "" { - return fmt.Errorf( - "argument 'name' for directive @goExtraField (src: %s, line: %d) cannot by empty", - efd.Position.Src.Name, - efd.Position.Line, - ) - } - + if t := efd.Arguments.ForName("type"); t != nil { extraField := ModelExtraField{} - if t := efd.Arguments.ForName("type"); t != nil { - if tv, err := t.Value.Value(nil); err == nil { - extraField.Type = tv.(string) - } + + if tv, err := t.Value.Value(nil); err == nil { + extraField.Type = tv.(string) } if extraField.Type == "" { @@ -394,13 +382,28 @@ func (c *Config) injectTypesFromSchema() error { } } - typeMapEntry := c.Models[schemaType.Name] - if typeMapEntry.ExtraFields == nil { - typeMapEntry.ExtraFields = make(map[string]ModelExtraField) + extraFieldName := "" + if fn := efd.Arguments.ForName("name"); fn != nil { + if fnv, err := fn.Value.Value(nil); err == nil { + extraFieldName = fnv.(string) + } } - c.Models[schemaType.Name] = typeMapEntry - c.Models[schemaType.Name].ExtraFields[extraFieldName] = extraField + if extraFieldName == "" { + // Embeddable fields + typeMapEntry := c.Models[schemaType.Name] + typeMapEntry.EmbedExtraFields = append(typeMapEntry.EmbedExtraFields, extraField) + c.Models[schemaType.Name] = typeMapEntry + } else { + // Regular fields + typeMapEntry := c.Models[schemaType.Name] + if typeMapEntry.ExtraFields == nil { + typeMapEntry.ExtraFields = make(map[string]ModelExtraField) + } + + c.Models[schemaType.Name] = typeMapEntry + c.Models[schemaType.Name].ExtraFields[extraFieldName] = extraField + } } } } @@ -439,7 +442,8 @@ type TypeMapEntry struct { EnumValues map[string]EnumValue `yaml:"enum_values,omitempty"` // Key is the Go name of the field. - ExtraFields map[string]ModelExtraField `yaml:"extraFields,omitempty"` + ExtraFields map[string]ModelExtraField `yaml:"extraFields,omitempty"` + EmbedExtraFields []ModelExtraField `yaml:"embedExtraFields,omitempty"` } type TypeMapField struct { @@ -480,7 +484,7 @@ type ModelExtraField struct { type StringList []string -func (a *StringList) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (a *StringList) UnmarshalYAML(unmarshal func(any) error) error { var single string err := unmarshal(&single) if err == nil { @@ -562,11 +566,11 @@ func (c *Config) check() error { Declaree: "federation", }) if c.Federation.ImportPath() != c.Exec.ImportPath() { - return fmt.Errorf("federation and exec must be in the same package") + return errors.New("federation and exec must be in the same package") } } if c.Federated { - return fmt.Errorf("federated has been removed, instead use\nfederation:\n filename: path/to/federated.go") + return errors.New("federated has been removed, instead use\nfederation:\n filename: path/to/federated.go") } for importPath, pkg := range fileList { @@ -641,7 +645,7 @@ func (tm TypeMap) ReferencedPackages() []string { return pkgs } -func (tm TypeMap) Add(name string, goType string) { +func (tm TypeMap) Add(name, goType string) { modelCfg := tm[name] modelCfg.Model = append(modelCfg.Model, goType) tm[name] = modelCfg diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/exec.go b/vendor/github.com/99designs/gqlgen/codegen/config/exec.go index fe1dccd2..838e17b2 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/exec.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/exec.go @@ -1,6 +1,7 @@ package config import ( + "errors" "fmt" "go/types" "path/filepath" @@ -38,15 +39,15 @@ func (r *ExecConfig) Check() error { switch r.Layout { case ExecLayoutSingleFile: if r.Filename == "" { - return fmt.Errorf("filename must be specified when using single-file layout") + return errors.New("filename must be specified when using single-file layout") } if !strings.HasSuffix(r.Filename, ".go") { - return fmt.Errorf("filename should be path to a go source file when using single-file layout") + return errors.New("filename should be path to a go source file when using single-file layout") } r.Filename = abs(r.Filename) case ExecLayoutFollowSchema: if r.DirName == "" { - return fmt.Errorf("dir must be specified when using follow-schema layout") + return errors.New("dir must be specified when using follow-schema layout") } r.DirName = abs(r.DirName) default: @@ -54,7 +55,7 @@ func (r *ExecConfig) Check() error { } if strings.ContainsAny(r.Package, "./\\") { - return fmt.Errorf("package should be the output package name only, do not include the output filename") + return errors.New("package should be the output package name only, do not include the output filename") } if r.Package == "" && r.Dir() != "" { diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/package.go b/vendor/github.com/99designs/gqlgen/codegen/config/package.go index 05e178b4..a399b2cc 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/package.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/package.go @@ -1,7 +1,7 @@ package config import ( - "fmt" + "errors" "go/types" "path/filepath" "strings" @@ -44,13 +44,13 @@ func (c *PackageConfig) IsDefined() bool { func (c *PackageConfig) Check() error { if strings.ContainsAny(c.Package, "./\\") { - return fmt.Errorf("package should be the output package name only, do not include the output filename") + return errors.New("package should be the output package name only, do not include the output filename") } if c.Filename == "" { - return fmt.Errorf("filename must be specified") + return errors.New("filename must be specified") } if !strings.HasSuffix(c.Filename, ".go") { - return fmt.Errorf("filename should be path to a go source file") + return errors.New("filename should be path to a go source file") } c.Filename = abs(c.Filename) diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go b/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go index cb5fb72b..1901fd2d 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/resolver.go @@ -1,6 +1,7 @@ package config import ( + "errors" "fmt" "go/types" "path/filepath" @@ -59,7 +60,7 @@ func (r *ResolverConfig) Check() error { } if strings.ContainsAny(r.Package, "./\\") { - return fmt.Errorf("package should be the output package name only, do not include the output filename") + return errors.New("package should be the output package name only, do not include the output filename") } if r.Package == "" && r.Dir() != "" { diff --git a/vendor/github.com/99designs/gqlgen/codegen/data.go b/vendor/github.com/99designs/gqlgen/codegen/data.go index 6cd72213..7110de2f 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/data.go +++ b/vendor/github.com/99designs/gqlgen/codegen/data.go @@ -1,6 +1,7 @@ package codegen import ( + "errors" "fmt" "os" "path/filepath" @@ -34,7 +35,7 @@ type Data struct { MutationRoot *Object SubscriptionRoot *Object AugmentedSources []AugmentedSource - Plugins []interface{} + Plugins []any } func (d *Data) HasEmbeddableSources() bool { @@ -77,7 +78,7 @@ func (d *Data) Directives() DirectiveList { return res } -func BuildData(cfg *config.Config, plugins ...interface{}) (*Data, error) { +func BuildData(cfg *config.Config, plugins ...any) (*Data, error) { // We reload all packages to allow packages to be compared correctly. cfg.ReloadAllPackages() @@ -137,7 +138,7 @@ func BuildData(cfg *config.Config, plugins ...interface{}) (*Data, error) { if s.Schema.Query != nil { s.QueryRoot = s.Objects.ByName(s.Schema.Query.Name) } else { - return nil, fmt.Errorf("query entry point missing") + return nil, errors.New("query entry point missing") } if s.Schema.Mutation != nil { @@ -170,7 +171,7 @@ func BuildData(cfg *config.Config, plugins ...interface{}) (*Data, error) { } // otherwise show a generic error message - return nil, fmt.Errorf("invalid types were encountered while traversing the go source code, this probably means the invalid code generated isnt correct. add try adding -v to debug") + return nil, errors.New("invalid types were encountered while traversing the go source code, this probably means the invalid code generated isnt correct. add try adding -v to debug") } aSources := []AugmentedSource{} for _, s := range cfg.Sources { @@ -204,7 +205,7 @@ func BuildData(cfg *config.Config, plugins ...interface{}) (*Data, error) { func (b *builder) injectIntrospectionRoots(s *Data) error { obj := s.Objects.ByName(b.Schema.Query.Name) if obj == nil { - return fmt.Errorf("root query type must be defined") + return errors.New("root query type must be defined") } __type, err := b.buildField(obj, &ast.FieldDefinition{ diff --git a/vendor/github.com/99designs/gqlgen/codegen/directive.go b/vendor/github.com/99designs/gqlgen/codegen/directive.go index 2034abfc..30a79c35 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/directive.go +++ b/vendor/github.com/99designs/gqlgen/codegen/directive.go @@ -92,7 +92,7 @@ func (b *builder) buildDirectives() (map[string]*Directive, error) { func (b *builder) getDirectives(list ast.DirectiveList) ([]*Directive, error) { dirs := make([]*Directive, len(list)) for i, d := range list { - argValues := make(map[string]interface{}, len(d.Arguments)) + argValues := make(map[string]any, len(d.Arguments)) for _, da := range d.Arguments { val, err := da.Value.Value(nil) if err != nil { diff --git a/vendor/github.com/99designs/gqlgen/codegen/field.go b/vendor/github.com/99designs/gqlgen/codegen/field.go index f58d7087..509f48cd 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/field.go +++ b/vendor/github.com/99designs/gqlgen/codegen/field.go @@ -31,7 +31,7 @@ type Field struct { NoErr bool // If this is bound to a go method, does that method have an error as the second argument VOkFunc bool // If this is bound to a go method, is it of shape (interface{}, bool) Object *Object // A link back to the parent object - Default interface{} // The default value + Default any // The default value Stream bool // does this field return a channel? Directives []*Directive } @@ -174,7 +174,7 @@ func (b *builder) bindField(obj *Object, f *Field) (errret error) { } else if s := sig.Results(); s.Len() == 2 && s.At(1).Type().String() == "bool" { f.VOkFunc = true } else if sig.Results().Len() != 2 { - return fmt.Errorf("method has wrong number of args") + return errors.New("method has wrong number of args") } params := sig.Params() // If the first argument is the context, remove it from the comparison and set diff --git a/vendor/github.com/99designs/gqlgen/codegen/field.gotpl b/vendor/github.com/99designs/gqlgen/codegen/field.gotpl index 124a6ff3..4bf5c13d 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/field.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/field.gotpl @@ -10,12 +10,14 @@ func (ec *executionContext) _{{$object.Name}}_{{$field.Name}}(ctx context.Contex return {{ $null }} } ctx = graphql.WithFieldContext(ctx, fc) + {{- if not $.Config.OmitPanicHandler }} defer func () { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) ret = {{ $null }} } }() + {{- end }} {{- if $field.TypeReference.IsRoot }} {{- if $field.TypeReference.IsPtr }} res := &{{ $field.TypeReference.Elem.GO | ref }}{} @@ -95,12 +97,14 @@ func (ec *executionContext) {{ $field.FieldContextFunc }}({{ if not $field.Args }, } {{- if $field.Args }} + {{- if not $.Config.OmitPanicHandler }} defer func () { if r := recover(); r != nil { err = ec.Recover(ctx, r) ec.Error(ctx, err) } }() + {{- end }} ctx = graphql.WithFieldContext(ctx, fc) if fc.Args, err = ec.{{ $field.ArgsFunc }}(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) diff --git a/vendor/github.com/99designs/gqlgen/codegen/generate.go b/vendor/github.com/99designs/gqlgen/codegen/generate.go index d63758ab..bbd4d947 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/generate.go +++ b/vendor/github.com/99designs/gqlgen/codegen/generate.go @@ -20,7 +20,7 @@ var codegenTemplates embed.FS func GenerateCode(data *Data) error { if !data.Config.Exec.IsDefined() { - return fmt.Errorf("missing exec config") + return errors.New("missing exec config") } switch data.Config.Exec.Layout { diff --git a/vendor/github.com/99designs/gqlgen/codegen/object.gotpl b/vendor/github.com/99designs/gqlgen/codegen/object.gotpl index 604e58ff..09689a27 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/object.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/object.gotpl @@ -49,11 +49,13 @@ func (ec *executionContext) _{{$object.Name}}(ctx context.Context, sel ast.Selec field := field innerFunc := func(ctx context.Context, {{ if $field.TypeReference.GQL.NonNull }}fs{{ else }}_{{ end }} *graphql.FieldSet) (res graphql.Marshaler) { + {{- if not $.Config.OmitPanicHandler }} defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) } }() + {{- end }} res = ec._{{$object.Name}}_{{$field.Name}}(ctx, field{{if not $object.Root}}, obj{{end}}) {{- if $field.TypeReference.GQL.NonNull }} if res == graphql.Null { diff --git a/vendor/github.com/99designs/gqlgen/codegen/templates/import.go b/vendor/github.com/99designs/gqlgen/codegen/templates/import.go index 50115283..c26bdeab 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/templates/import.go +++ b/vendor/github.com/99designs/gqlgen/codegen/templates/import.go @@ -1,6 +1,7 @@ package templates import ( + "errors" "fmt" "go/types" "strconv" @@ -62,11 +63,11 @@ func (s *Imports) Reserve(path string, aliases ...string) (string, error) { if existing.Alias == alias { return "", nil } - return "", fmt.Errorf("ambient import already exists") + return "", errors.New("ambient import already exists") } if alias := s.findByAlias(alias); alias != nil { - return "", fmt.Errorf("ambient import collides on an alias") + return "", errors.New("ambient import collides on an alias") } s.imports = append(s.imports, &Import{ diff --git a/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go b/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go index 669ab58d..4de30761 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go +++ b/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go @@ -2,6 +2,7 @@ package templates import ( "bytes" + "errors" "fmt" "go/types" "io/fs" @@ -52,7 +53,7 @@ type Options struct { // FileNotice is notice written below the package line FileNotice string // Data will be passed to the template execution. - Data interface{} + Data any Funcs template.FuncMap // Packages cache, you can find me on config.Config @@ -71,7 +72,7 @@ var ( // files inside the directory where you wrote the plugin. func Render(cfg Options) error { if CurrentImports != nil { - panic(fmt.Errorf("recursive or concurrent call to RenderToFile detected")) + panic(errors.New("recursive or concurrent call to RenderToFile detected")) } CurrentImports = &Imports{packages: cfg.Packages, destDir: filepath.Dir(cfg.Filename)} @@ -184,7 +185,7 @@ func parseTemplates(cfg Options, t *template.Template) (*template.Template, erro return t, nil } -func center(width int, pad string, s string) string { +func center(width int, pad, s string) string { if len(s)+2 > width { return s } @@ -206,6 +207,7 @@ func Funcs() template.FuncMap { "call": Call, "prefixLines": prefixLines, "notNil": notNil, + "strSplit": StrSplit, "reserveImport": CurrentImports.Reserve, "lookupImport": CurrentImports.Lookup, "go": ToGo, @@ -215,7 +217,7 @@ func Funcs() template.FuncMap { "add": func(a, b int) int { return a + b }, - "render": func(filename string, tpldata interface{}) (*bytes.Buffer, error) { + "render": func(filename string, tpldata any) (*bytes.Buffer, error) { return render(resolveName(filename, 0), tpldata) }, } @@ -567,7 +569,7 @@ func rawQuote(s string) string { return "`" + strings.ReplaceAll(s, "`", "`+\"`\"+`") + "`" } -func notNil(field string, data interface{}) bool { +func notNil(field string, data any) bool { v := reflect.ValueOf(data) if v.Kind() == reflect.Ptr { @@ -581,12 +583,16 @@ func notNil(field string, data interface{}) bool { return val.IsValid() && !val.IsNil() } -func Dump(val interface{}) string { +func StrSplit(s, sep string) []string { + return strings.Split(s, sep) +} + +func Dump(val any) string { switch val := val.(type) { case int: return strconv.Itoa(val) case int64: - return fmt.Sprintf("%d", val) + return strconv.FormatInt(val, 10) case float64: return fmt.Sprintf("%f", val) case string: @@ -595,13 +601,13 @@ func Dump(val interface{}) string { return strconv.FormatBool(val) case nil: return "nil" - case []interface{}: + case []any: var parts []string for _, part := range val { parts = append(parts, Dump(part)) } return "[]interface{}{" + strings.Join(parts, ",") + "}" - case map[string]interface{}: + case map[string]any: buf := bytes.Buffer{} buf.WriteString("map[string]interface{}{") var keys []string @@ -641,7 +647,7 @@ func resolveName(name string, skip int) string { return filepath.Join(filepath.Dir(callerFile), name) } -func render(filename string, tpldata interface{}) (*bytes.Buffer, error) { +func render(filename string, tpldata any) (*bytes.Buffer, error) { t := template.New("").Funcs(Funcs()) b, err := os.ReadFile(filename) diff --git a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl index 116f0a90..ebebdf14 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl @@ -115,12 +115,14 @@ } ctx := graphql.WithFieldContext(ctx, fc) f := func(i int) { + {{- if not $.Config.OmitPanicHandler }} defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) ret = nil } }() + {{- end }} if !isLen1 { defer wg.Done() } diff --git a/vendor/github.com/99designs/gqlgen/complexity/complexity.go b/vendor/github.com/99designs/gqlgen/complexity/complexity.go index aa0f8643..288bb539 100644 --- a/vendor/github.com/99designs/gqlgen/complexity/complexity.go +++ b/vendor/github.com/99designs/gqlgen/complexity/complexity.go @@ -6,7 +6,7 @@ import ( "github.com/99designs/gqlgen/graphql" ) -func Calculate(es graphql.ExecutableSchema, op *ast.OperationDefinition, vars map[string]interface{}) int { +func Calculate(es graphql.ExecutableSchema, op *ast.OperationDefinition, vars map[string]any) int { walker := complexityWalker{ es: es, schema: es.Schema(), @@ -18,7 +18,7 @@ func Calculate(es graphql.ExecutableSchema, op *ast.OperationDefinition, vars ma type complexityWalker struct { es graphql.ExecutableSchema schema *ast.Schema - vars map[string]interface{} + vars map[string]any } func (cw complexityWalker) selectionSetComplexity(selectionSet ast.SelectionSet) int { @@ -57,7 +57,7 @@ func (cw complexityWalker) selectionSetComplexity(selectionSet ast.SelectionSet) return complexity } -func (cw complexityWalker) interfaceFieldComplexity(def *ast.Definition, field string, childComplexity int, args map[string]interface{}) int { +func (cw complexityWalker) interfaceFieldComplexity(def *ast.Definition, field string, childComplexity int, args map[string]any) int { // Interfaces don't have their own separate field costs, so they have to assume the worst case. // We iterate over all implementors and choose the most expensive one. maxComplexity := 0 @@ -71,7 +71,7 @@ func (cw complexityWalker) interfaceFieldComplexity(def *ast.Definition, field s return maxComplexity } -func (cw complexityWalker) fieldComplexity(object, field string, childComplexity int, args map[string]interface{}) int { +func (cw complexityWalker) fieldComplexity(object, field string, childComplexity int, args map[string]any) int { if customComplexity, ok := cw.es.Complexity(object, field, childComplexity, args); ok && customComplexity >= childComplexity { return customComplexity } diff --git a/vendor/github.com/99designs/gqlgen/graphql/any.go b/vendor/github.com/99designs/gqlgen/graphql/any.go index 6ea8bf2e..be600b2f 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/any.go +++ b/vendor/github.com/99designs/gqlgen/graphql/any.go @@ -5,7 +5,7 @@ import ( "io" ) -func MarshalAny(v interface{}) Marshaler { +func MarshalAny(v any) Marshaler { return WriterFunc(func(w io.Writer) { err := json.NewEncoder(w).Encode(v) if err != nil { @@ -14,6 +14,6 @@ func MarshalAny(v interface{}) Marshaler { }) } -func UnmarshalAny(v interface{}) (interface{}, error) { +func UnmarshalAny(v any) (any, error) { return v, nil } diff --git a/vendor/github.com/99designs/gqlgen/graphql/bool.go b/vendor/github.com/99designs/gqlgen/graphql/bool.go index f435e0c0..b01f6eb1 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/bool.go +++ b/vendor/github.com/99designs/gqlgen/graphql/bool.go @@ -3,20 +3,19 @@ package graphql import ( "fmt" "io" + "strconv" "strings" ) func MarshalBoolean(b bool) Marshaler { - if b { - return WriterFunc(func(w io.Writer) { w.Write(trueLit) }) - } - return WriterFunc(func(w io.Writer) { w.Write(falseLit) }) + str := strconv.FormatBool(b) + return WriterFunc(func(w io.Writer) { w.Write([]byte(str)) }) } -func UnmarshalBoolean(v interface{}) (bool, error) { +func UnmarshalBoolean(v any) (bool, error) { switch v := v.(type) { case string: - return strings.ToLower(v) == "true", nil + return strings.EqualFold(v, "true"), nil case int: return v != 0, nil case bool: diff --git a/vendor/github.com/99designs/gqlgen/graphql/cache.go b/vendor/github.com/99designs/gqlgen/graphql/cache.go index e552ce67..ef2dd5a5 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/cache.go +++ b/vendor/github.com/99designs/gqlgen/graphql/cache.go @@ -5,25 +5,25 @@ import "context" // Cache is a shared store for APQ and query AST caching type Cache interface { // Get looks up a key's value from the cache. - Get(ctx context.Context, key string) (value interface{}, ok bool) + Get(ctx context.Context, key string) (value any, ok bool) // Add adds a value to the cache. - Add(ctx context.Context, key string, value interface{}) + Add(ctx context.Context, key string, value any) } // MapCache is the simplest implementation of a cache, because it can not evict it should only be used in tests -type MapCache map[string]interface{} +type MapCache map[string]any // Get looks up a key's value from the cache. -func (m MapCache) Get(_ context.Context, key string) (value interface{}, ok bool) { +func (m MapCache) Get(_ context.Context, key string) (value any, ok bool) { v, ok := m[key] return v, ok } // Add adds a value to the cache. -func (m MapCache) Add(_ context.Context, key string, value interface{}) { m[key] = value } +func (m MapCache) Add(_ context.Context, key string, value any) { m[key] = value } type NoCache struct{} -func (n NoCache) Get(_ context.Context, _ string) (value interface{}, ok bool) { return nil, false } -func (n NoCache) Add(_ context.Context, _ string, _ interface{}) {} +func (n NoCache) Get(_ context.Context, _ string) (value any, ok bool) { return nil, false } +func (n NoCache) Add(_ context.Context, _ string, _ any) {} diff --git a/vendor/github.com/99designs/gqlgen/graphql/coercion.go b/vendor/github.com/99designs/gqlgen/graphql/coercion.go index d3d3c18b..533ab821 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/coercion.go +++ b/vendor/github.com/99designs/gqlgen/graphql/coercion.go @@ -5,51 +5,51 @@ import ( ) // CoerceList applies coercion from a single value to a list. -func CoerceList(v interface{}) []interface{} { - var vSlice []interface{} +func CoerceList(v any) []any { + var vSlice []any if v != nil { switch v := v.(type) { - case []interface{}: + case []any: // already a slice no coercion required vSlice = v case []string: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } case []json.Number: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } case []bool: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } - case []map[string]interface{}: + case []map[string]any: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } case []float64: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } case []float32: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } case []int: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } case []int32: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } case []int64: if len(v) > 0 { - vSlice = []interface{}{v[0]} + vSlice = []any{v[0]} } default: - vSlice = []interface{}{v} + vSlice = []any{v} } } return vSlice diff --git a/vendor/github.com/99designs/gqlgen/graphql/context_field.go b/vendor/github.com/99designs/gqlgen/graphql/context_field.go index 1f9a6e88..b3fab910 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/context_field.go +++ b/vendor/github.com/99designs/gqlgen/graphql/context_field.go @@ -19,13 +19,13 @@ type FieldContext struct { // The name of the type this field belongs to Object string // These are the args after processing, they can be mutated in middleware to change what the resolver will get. - Args map[string]interface{} + Args map[string]any // The raw field Field CollectedField // The index of array in path. Index *int // The result object of resolver - Result interface{} + Result any // IsMethod indicates if the resolver is a method IsMethod bool // IsResolver indicates if the field has a user-specified resolver @@ -98,7 +98,7 @@ func WithFieldContext(ctx context.Context, rc *FieldContext) context.Context { return context.WithValue(ctx, resolverCtx, rc) } -func equalPath(a ast.Path, b ast.Path) bool { +func equalPath(a, b ast.Path) bool { if len(a) != len(b) { return false } diff --git a/vendor/github.com/99designs/gqlgen/graphql/context_operation.go b/vendor/github.com/99designs/gqlgen/graphql/context_operation.go index 3e6a221b..d515acce 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/context_operation.go +++ b/vendor/github.com/99designs/gqlgen/graphql/context_operation.go @@ -14,7 +14,7 @@ type RequestContext = OperationContext type OperationContext struct { RawQuery string - Variables map[string]interface{} + Variables map[string]any OperationName string Doc *ast.QueryDocument Headers http.Header @@ -36,7 +36,7 @@ func (c *OperationContext) Validate(ctx context.Context) error { return errors.New("field 'RawQuery' is required") } if c.Variables == nil { - c.Variables = make(map[string]interface{}) + c.Variables = make(map[string]any) } if c.ResolverMiddleware == nil { return errors.New("field 'ResolverMiddleware' is required") @@ -103,7 +103,7 @@ Next: // Errorf sends an error string to the client, passing it through the formatter. // Deprecated: use graphql.AddErrorf(ctx, err) instead -func (c *OperationContext) Errorf(ctx context.Context, format string, args ...interface{}) { +func (c *OperationContext) Errorf(ctx context.Context, format string, args ...any) { AddErrorf(ctx, format, args...) } @@ -120,6 +120,6 @@ func (c *OperationContext) Error(ctx context.Context, err error) { AddError(ctx, err) } -func (c *OperationContext) Recover(ctx context.Context, err interface{}) error { +func (c *OperationContext) Recover(ctx context.Context, err any) error { return ErrorOnPath(ctx, c.RecoverFunc(ctx, err)) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/context_response.go b/vendor/github.com/99designs/gqlgen/graphql/context_response.go index 6d223c8a..e0f3285f 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/context_response.go +++ b/vendor/github.com/99designs/gqlgen/graphql/context_response.go @@ -15,7 +15,7 @@ type responseContext struct { errors gqlerror.List errorsMu sync.Mutex - extensions map[string]interface{} + extensions map[string]any extensionsMu sync.Mutex } @@ -45,7 +45,7 @@ func WithFreshResponseContext(ctx context.Context) context.Context { } // AddErrorf writes a formatted error to the client, first passing it through the error presenter. -func AddErrorf(ctx context.Context, format string, args ...interface{}) { +func AddErrorf(ctx context.Context, format string, args ...any) { AddError(ctx, fmt.Errorf(format, args...)) } @@ -60,7 +60,7 @@ func AddError(ctx context.Context, err error) { c.errors = append(c.errors, presentedError) } -func Recover(ctx context.Context, err interface{}) (userMessage error) { +func Recover(ctx context.Context, err any) (userMessage error) { c := getResponseContext(ctx) return ErrorOnPath(ctx, c.recover(ctx, err)) } @@ -125,13 +125,13 @@ func GetErrors(ctx context.Context) gqlerror.List { } // RegisterExtension allows you to add a new extension into the graphql response -func RegisterExtension(ctx context.Context, key string, value interface{}) { +func RegisterExtension(ctx context.Context, key string, value any) { c := getResponseContext(ctx) c.extensionsMu.Lock() defer c.extensionsMu.Unlock() if c.extensions == nil { - c.extensions = make(map[string]interface{}) + c.extensions = make(map[string]any) } if _, ok := c.extensions[key]; ok { @@ -142,16 +142,16 @@ func RegisterExtension(ctx context.Context, key string, value interface{}) { } // GetExtensions returns any extensions registered in the current result context -func GetExtensions(ctx context.Context) map[string]interface{} { +func GetExtensions(ctx context.Context) map[string]any { ext := getResponseContext(ctx).extensions if ext == nil { - return map[string]interface{}{} + return map[string]any{} } return ext } -func GetExtension(ctx context.Context, name string) interface{} { +func GetExtension(ctx context.Context, name string) any { ext := getResponseContext(ctx).extensions if ext == nil { return nil diff --git a/vendor/github.com/99designs/gqlgen/graphql/duration.go b/vendor/github.com/99designs/gqlgen/graphql/duration.go index 3eb392db..bf2b5647 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/duration.go +++ b/vendor/github.com/99designs/gqlgen/graphql/duration.go @@ -1,17 +1,17 @@ package graphql import ( - "fmt" + "errors" "time" dur "github.com/sosodev/duration" ) // UnmarshalDuration returns the duration from a string in ISO8601 format -func UnmarshalDuration(v interface{}) (time.Duration, error) { +func UnmarshalDuration(v any) (time.Duration, error) { input, ok := v.(string) if !ok { - return 0, fmt.Errorf("input must be a string") + return 0, errors.New("input must be a string") } d2, err := dur.Parse(input) diff --git a/vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go b/vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go index 854b206f..58ca7cbe 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go +++ b/vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go @@ -40,7 +40,7 @@ func Set(err error, value string) { } if gqlErr.Extensions == nil { - gqlErr.Extensions = map[string]interface{}{} + gqlErr.Extensions = map[string]any{} } gqlErr.Extensions["code"] = value diff --git a/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go b/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go index 58f942a1..aa9d7c44 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go +++ b/vendor/github.com/99designs/gqlgen/graphql/executable_schema.go @@ -12,7 +12,7 @@ import ( type ExecutableSchema interface { Schema() *ast.Schema - Complexity(typeName, fieldName string, childComplexity int, args map[string]interface{}) (int, bool) + Complexity(typeName, fieldName string, childComplexity int, args map[string]any) (int, bool) Exec(ctx context.Context) ResponseHandler } @@ -116,7 +116,7 @@ func instanceOf(val string, satisfies []string) bool { return false } -func getOrCreateAndAppendField(c *[]CollectedField, name string, alias string, objectDefinition *ast.Definition, creator func() CollectedField) *CollectedField { +func getOrCreateAndAppendField(c *[]CollectedField, name, alias string, objectDefinition *ast.Definition, creator func() CollectedField) *CollectedField { for i, cf := range *c { if cf.Name == name && cf.Alias == alias { if cf.ObjectDefinition == objectDefinition { @@ -150,7 +150,7 @@ func getOrCreateAndAppendField(c *[]CollectedField, name string, alias string, o return &(*c)[len(*c)-1] } -func shouldIncludeNode(directives ast.DirectiveList, variables map[string]interface{}) bool { +func shouldIncludeNode(directives ast.DirectiveList, variables map[string]any) bool { if len(directives) == 0 { return true } @@ -168,7 +168,7 @@ func shouldIncludeNode(directives ast.DirectiveList, variables map[string]interf return !skip && include } -func deferrable(directives ast.DirectiveList, variables map[string]interface{}) (shouldDefer bool, label string) { +func deferrable(directives ast.DirectiveList, variables map[string]any) (shouldDefer bool, label string) { d := directives.ForName("defer") if d == nil { return false, "" @@ -194,7 +194,7 @@ func deferrable(directives ast.DirectiveList, variables map[string]interface{}) return shouldDefer, label } -func resolveIfArgument(d *ast.Directive, variables map[string]interface{}) bool { +func resolveIfArgument(d *ast.Directive, variables map[string]any) bool { arg := d.Arguments.ForName("if") if arg == nil { panic(fmt.Sprintf("%s: argument 'if' not defined", d.Name)) diff --git a/vendor/github.com/99designs/gqlgen/graphql/executable_schema_mock.go b/vendor/github.com/99designs/gqlgen/graphql/executable_schema_mock.go index 5e71cb83..c4c41189 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/executable_schema_mock.go +++ b/vendor/github.com/99designs/gqlgen/graphql/executable_schema_mock.go @@ -19,7 +19,7 @@ var _ ExecutableSchema = &ExecutableSchemaMock{} // // // make and configure a mocked ExecutableSchema // mockedExecutableSchema := &ExecutableSchemaMock{ -// ComplexityFunc: func(typeName string, fieldName string, childComplexity int, args map[string]interface{}) (int, bool) { +// ComplexityFunc: func(typeName string, fieldName string, childComplexity int, args map[string]any) (int, bool) { // panic("mock out the Complexity method") // }, // ExecFunc: func(ctx context.Context) ResponseHandler { @@ -36,7 +36,7 @@ var _ ExecutableSchema = &ExecutableSchemaMock{} // } type ExecutableSchemaMock struct { // ComplexityFunc mocks the Complexity method. - ComplexityFunc func(typeName string, fieldName string, childComplexity int, args map[string]interface{}) (int, bool) + ComplexityFunc func(typeName string, fieldName string, childComplexity int, args map[string]any) (int, bool) // ExecFunc mocks the Exec method. ExecFunc func(ctx context.Context) ResponseHandler @@ -55,7 +55,7 @@ type ExecutableSchemaMock struct { // ChildComplexity is the childComplexity argument value. ChildComplexity int // Args is the args argument value. - Args map[string]interface{} + Args map[string]any } // Exec holds details about calls to the Exec method. Exec []struct { @@ -72,7 +72,7 @@ type ExecutableSchemaMock struct { } // Complexity calls ComplexityFunc. -func (mock *ExecutableSchemaMock) Complexity(typeName string, fieldName string, childComplexity int, args map[string]interface{}) (int, bool) { +func (mock *ExecutableSchemaMock) Complexity(typeName string, fieldName string, childComplexity int, args map[string]any) (int, bool) { if mock.ComplexityFunc == nil { panic("ExecutableSchemaMock.ComplexityFunc: method is nil but ExecutableSchema.Complexity was just called") } @@ -80,7 +80,7 @@ func (mock *ExecutableSchemaMock) Complexity(typeName string, fieldName string, TypeName string FieldName string ChildComplexity int - Args map[string]interface{} + Args map[string]any }{ TypeName: typeName, FieldName: fieldName, @@ -101,13 +101,13 @@ func (mock *ExecutableSchemaMock) ComplexityCalls() []struct { TypeName string FieldName string ChildComplexity int - Args map[string]interface{} + Args map[string]any } { var calls []struct { TypeName string FieldName string ChildComplexity int - Args map[string]interface{} + Args map[string]any } mock.lockComplexity.RLock() calls = mock.calls.Complexity diff --git a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go index ef0603ea..426ad09b 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go +++ b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go @@ -12,6 +12,8 @@ import ( "github.com/99designs/gqlgen/graphql/errcode" ) +const parserTokenNoLimit = 0 + // Executor executes graphql queries against a schema. type Executor struct { es graphql.ExecutableSchema @@ -21,6 +23,8 @@ type Executor struct { errorPresenter graphql.ErrorPresenterFunc recoverFunc graphql.RecoverFunc queryCache graphql.Cache + + parserTokenLimit int } var _ graphql.GraphExecutor = &Executor{} @@ -29,11 +33,12 @@ var _ graphql.GraphExecutor = &Executor{} // recovery callbacks, and no query cache or extensions. func New(es graphql.ExecutableSchema) *Executor { e := &Executor{ - es: es, - errorPresenter: graphql.DefaultErrorPresenter, - recoverFunc: graphql.DefaultRecover, - queryCache: graphql.NoCache{}, - ext: processExtensions(nil), + es: es, + errorPresenter: graphql.DefaultErrorPresenter, + recoverFunc: graphql.DefaultRecover, + queryCache: graphql.NoCache{}, + ext: processExtensions(nil), + parserTokenLimit: parserTokenNoLimit, } return e } @@ -153,7 +158,7 @@ func (e *Executor) DispatchError(ctx context.Context, list gqlerror.List) *graph return resp } -func (e *Executor) PresentRecoveredError(ctx context.Context, err interface{}) error { +func (e *Executor) PresentRecoveredError(ctx context.Context, err any) error { return e.errorPresenter(ctx, e.recoverFunc(ctx, err)) } @@ -169,6 +174,10 @@ func (e *Executor) SetRecoverFunc(f graphql.RecoverFunc) { e.recoverFunc = f } +func (e *Executor) SetParserTokenLimit(limit int) { + e.parserTokenLimit = limit +} + // parseQuery decodes the incoming query and validates it, pulling from cache if present. // // NOTE: This should NOT look at variables, they will change per request. It should only parse and @@ -189,7 +198,7 @@ func (e *Executor) parseQuery( return doc.(*ast.QueryDocument), nil } - doc, err := parser.ParseQuery(&ast.Source{Input: query}) + doc, err := parser.ParseQueryWithTokenLimit(&ast.Source{Input: query}, e.parserTokenLimit) if err != nil { gqlErr, ok := err.(*gqlerror.Error) if ok { diff --git a/vendor/github.com/99designs/gqlgen/graphql/executor/extensions.go b/vendor/github.com/99designs/gqlgen/graphql/executor/extensions.go index a8eebf11..758d8e4e 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/executor/extensions.go +++ b/vendor/github.com/99designs/gqlgen/graphql/executor/extensions.go @@ -2,6 +2,7 @@ package executor import ( "context" + "errors" "fmt" "github.com/99designs/gqlgen/graphql" @@ -68,7 +69,7 @@ func processExtensions(exts []graphql.HandlerExtension) extensions { rootFieldMiddleware: func(ctx context.Context, next graphql.RootResolver) graphql.Marshaler { return next(ctx) }, - fieldMiddleware: func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) { + fieldMiddleware: func(ctx context.Context, next graphql.Resolver) (res any, err error) { return next(ctx) }, } @@ -105,8 +106,8 @@ func processExtensions(exts []graphql.HandlerExtension) extensions { if p, ok := p.(graphql.FieldInterceptor); ok { previous := e.fieldMiddleware - e.fieldMiddleware = func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) { - return p.InterceptField(ctx, func(ctx context.Context) (res interface{}, err error) { + e.fieldMiddleware = func(ctx context.Context, next graphql.Resolver) (res any, err error) { + return p.InterceptField(ctx, func(ctx context.Context) (res any, err error) { return previous(ctx, next) }) } @@ -134,7 +135,7 @@ func (r aroundOpFunc) ExtensionName() string { func (r aroundOpFunc) Validate(schema graphql.ExecutableSchema) error { if r == nil { - return fmt.Errorf("OperationFunc can not be nil") + return errors.New("OperationFunc can not be nil") } return nil } @@ -151,7 +152,7 @@ func (r aroundRespFunc) ExtensionName() string { func (r aroundRespFunc) Validate(schema graphql.ExecutableSchema) error { if r == nil { - return fmt.Errorf("ResponseFunc can not be nil") + return errors.New("ResponseFunc can not be nil") } return nil } @@ -160,7 +161,7 @@ func (r aroundRespFunc) InterceptResponse(ctx context.Context, next graphql.Resp return r(ctx, next) } -type aroundFieldFunc func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) +type aroundFieldFunc func(ctx context.Context, next graphql.Resolver) (res any, err error) func (f aroundFieldFunc) ExtensionName() string { return "InlineFieldFunc" @@ -168,12 +169,12 @@ func (f aroundFieldFunc) ExtensionName() string { func (f aroundFieldFunc) Validate(schema graphql.ExecutableSchema) error { if f == nil { - return fmt.Errorf("FieldFunc can not be nil") + return errors.New("FieldFunc can not be nil") } return nil } -func (f aroundFieldFunc) InterceptField(ctx context.Context, next graphql.Resolver) (res interface{}, err error) { +func (f aroundFieldFunc) InterceptField(ctx context.Context, next graphql.Resolver) (res any, err error) { return f(ctx, next) } @@ -185,7 +186,7 @@ func (f aroundRootFieldFunc) ExtensionName() string { func (f aroundRootFieldFunc) Validate(schema graphql.ExecutableSchema) error { if f == nil { - return fmt.Errorf("RootFieldFunc can not be nil") + return errors.New("RootFieldFunc can not be nil") } return nil } diff --git a/vendor/github.com/99designs/gqlgen/graphql/float.go b/vendor/github.com/99designs/gqlgen/graphql/float.go index ccb825dd..465f46af 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/float.go +++ b/vendor/github.com/99designs/gqlgen/graphql/float.go @@ -3,6 +3,7 @@ package graphql import ( "context" "encoding/json" + "errors" "fmt" "io" "math" @@ -11,11 +12,11 @@ import ( func MarshalFloat(f float64) Marshaler { return WriterFunc(func(w io.Writer) { - io.WriteString(w, fmt.Sprintf("%g", f)) + fmt.Fprintf(w, "%g", f) }) } -func UnmarshalFloat(v interface{}) (float64, error) { +func UnmarshalFloat(v any) (float64, error) { switch v := v.(type) { case string: return strconv.ParseFloat(v, 64) @@ -35,13 +36,13 @@ func UnmarshalFloat(v interface{}) (float64, error) { func MarshalFloatContext(f float64) ContextMarshaler { return ContextWriterFunc(func(ctx context.Context, w io.Writer) error { if math.IsInf(f, 0) || math.IsNaN(f) { - return fmt.Errorf("cannot marshal infinite no NaN float values") + return errors.New("cannot marshal infinite no NaN float values") } - io.WriteString(w, fmt.Sprintf("%g", f)) + fmt.Fprintf(w, "%g", f) return nil }) } -func UnmarshalFloatContext(ctx context.Context, v interface{}) (float64, error) { +func UnmarshalFloatContext(ctx context.Context, v any) (float64, error) { return UnmarshalFloat(v) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler.go b/vendor/github.com/99designs/gqlgen/graphql/handler.go index cd358740..4df36117 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler.go @@ -16,18 +16,18 @@ type ( ResponseHandler func(ctx context.Context) *Response ResponseMiddleware func(ctx context.Context, next ResponseHandler) *Response - Resolver func(ctx context.Context) (res interface{}, err error) - FieldMiddleware func(ctx context.Context, next Resolver) (res interface{}, err error) + Resolver func(ctx context.Context) (res any, err error) + FieldMiddleware func(ctx context.Context, next Resolver) (res any, err error) RootResolver func(ctx context.Context) Marshaler RootFieldMiddleware func(ctx context.Context, next RootResolver) Marshaler RawParams struct { - Query string `json:"query"` - OperationName string `json:"operationName"` - Variables map[string]interface{} `json:"variables"` - Extensions map[string]interface{} `json:"extensions"` - Headers http.Header `json:"headers"` + Query string `json:"query"` + OperationName string `json:"operationName"` + Variables map[string]any `json:"variables"` + Extensions map[string]any `json:"extensions"` + Headers http.Header `json:"headers"` ReadTime TraceTiming `json:"-"` } @@ -86,7 +86,7 @@ type ( // FieldInterceptor called around each field FieldInterceptor interface { - InterceptField(ctx context.Context, next Resolver) (res interface{}, err error) + InterceptField(ctx context.Context, next Resolver) (res any, err error) } // Transport provides support for different wire level encodings of graphql requests, eg Form, Get, Post, Websocket @@ -103,7 +103,7 @@ func (p *RawParams) AddUpload(upload Upload, key, path string) *gqlerror.Error { return gqlerror.Errorf("invalid operations paths for key %s", key) } - var ptr interface{} = p.Variables + var ptr any = p.Variables parts := strings.Split(path, ".") // skip the first part (variables) because we started there @@ -114,15 +114,15 @@ func (p *RawParams) AddUpload(upload Upload, key, path string) *gqlerror.Error { } if index, parseNbrErr := strconv.Atoi(p); parseNbrErr == nil { if last { - ptr.([]interface{})[index] = upload + ptr.([]any)[index] = upload } else { - ptr = ptr.([]interface{})[index] + ptr = ptr.([]any)[index] } } else { if last { - ptr.(map[string]interface{})[p] = upload + ptr.(map[string]any)[p] = upload } else { - ptr = ptr.(map[string]interface{})[p] + ptr = ptr.(map[string]any)[p] } } } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go index 465c2ada..115aaa8a 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go @@ -4,7 +4,7 @@ import ( "context" "crypto/sha256" "encoding/hex" - "fmt" + "errors" "github.com/mitchellh/mapstructure" "github.com/vektah/gqlparser/v2/gqlerror" @@ -47,7 +47,7 @@ func (a AutomaticPersistedQuery) ExtensionName() string { func (a AutomaticPersistedQuery) Validate(schema graphql.ExecutableSchema) error { if a.Cache == nil { - return fmt.Errorf("AutomaticPersistedQuery.Cache can not be nil") + return errors.New("AutomaticPersistedQuery.Cache can not be nil") } return nil } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go index a5b6a604..af1e002f 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/complexity.go @@ -2,7 +2,7 @@ package extension import ( "context" - "fmt" + "errors" "github.com/vektah/gqlparser/v2/gqlerror" @@ -52,7 +52,7 @@ func (c ComplexityLimit) ExtensionName() string { func (c *ComplexityLimit) Validate(schema graphql.ExecutableSchema) error { if c.Func == nil { - return fmt.Errorf("ComplexityLimit func can not be nil") + return errors.New("ComplexityLimit func can not be nil") } c.es = schema return nil diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go b/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go index 6ae8a38e..9dc480e9 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go @@ -24,10 +24,10 @@ func New(size int) *LRU { return &LRU{cache} } -func (l LRU) Get(ctx context.Context, key string) (value interface{}, ok bool) { +func (l LRU) Get(ctx context.Context, key string) (value any, ok bool) { return l.lru.Get(key) } -func (l LRU) Add(ctx context.Context, key string, value interface{}) { +func (l LRU) Add(ctx context.Context, key string, value any) { l.lru.Add(key, value) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/server.go b/vendor/github.com/99designs/gqlgen/graphql/handler/server.go index 893f0944..54376b13 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/server.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/server.go @@ -3,6 +3,7 @@ package handler import ( "context" "encoding/json" + "errors" "fmt" "net/http" "time" @@ -66,6 +67,10 @@ func (s *Server) SetQueryCache(cache graphql.Cache) { s.exec.SetQueryCache(cache) } +func (s *Server) SetParserTokenLimit(limit int) { + s.exec.SetParserTokenLimit(limit) +} + func (s *Server) Use(extension graphql.HandlerExtension) { s.exec.Use(extension) } @@ -131,7 +136,7 @@ func sendError(w http.ResponseWriter, code int, errors ...*gqlerror.Error) { _, _ = w.Write(b) } -func sendErrorf(w http.ResponseWriter, code int, format string, args ...interface{}) { +func sendErrorf(w http.ResponseWriter, code int, format string, args ...any) { sendError(w, code, &gqlerror.Error{Message: fmt.Sprintf(format, args...)}) } @@ -143,7 +148,7 @@ func (r OperationFunc) ExtensionName() string { func (r OperationFunc) Validate(schema graphql.ExecutableSchema) error { if r == nil { - return fmt.Errorf("OperationFunc can not be nil") + return errors.New("OperationFunc can not be nil") } return nil } @@ -160,7 +165,7 @@ func (r ResponseFunc) ExtensionName() string { func (r ResponseFunc) Validate(schema graphql.ExecutableSchema) error { if r == nil { - return fmt.Errorf("ResponseFunc can not be nil") + return errors.New("ResponseFunc can not be nil") } return nil } @@ -169,7 +174,7 @@ func (r ResponseFunc) InterceptResponse(ctx context.Context, next graphql.Respon return r(ctx, next) } -type FieldFunc func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) +type FieldFunc func(ctx context.Context, next graphql.Resolver) (res any, err error) func (f FieldFunc) ExtensionName() string { return "InlineFieldFunc" @@ -177,11 +182,11 @@ func (f FieldFunc) ExtensionName() string { func (f FieldFunc) Validate(schema graphql.ExecutableSchema) error { if f == nil { - return fmt.Errorf("FieldFunc can not be nil") + return errors.New("FieldFunc can not be nil") } return nil } -func (f FieldFunc) InterceptField(ctx context.Context, next graphql.Resolver) (res interface{}, err error) { +func (f FieldFunc) InterceptField(ctx context.Context, next graphql.Resolver) (res any, err error) { return f(ctx, next) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/error.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/error.go index 7c4e2d8e..1fefb573 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/error.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/error.go @@ -22,6 +22,6 @@ func SendError(w http.ResponseWriter, code int, errors ...*gqlerror.Error) { } // SendErrorf wraps SendError to add formatted messages -func SendErrorf(w http.ResponseWriter, code int, format string, args ...interface{}) { +func SendErrorf(w http.ResponseWriter, code int, format string, args ...any) { SendError(w, code, &gqlerror.Error{Message: fmt.Sprintf(format, args...)}) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_form_urlencoded.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_form_urlencoded.go index 73317e4b..f877c2dd 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_form_urlencoded.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_form_urlencoded.go @@ -63,10 +63,10 @@ func (h UrlEncodedForm) Do(w http.ResponseWriter, r *http.Request, exec graphql. return } - rc, OpErr := exec.CreateOperationContext(ctx, params) - if OpErr != nil { - w.WriteHeader(statusFor(OpErr)) - resp := exec.DispatchError(graphql.WithOperationContext(ctx, rc), OpErr) + rc, opErr := exec.CreateOperationContext(ctx, params) + if opErr != nil { + w.WriteHeader(statusFor(opErr)) + resp := exec.DispatchError(graphql.WithOperationContext(ctx, rc), opErr) writeJson(w, resp) return } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go index 9a47bfbe..470a0fbe 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_get.go @@ -84,7 +84,7 @@ func (h GET) Do(w http.ResponseWriter, r *http.Request, exec graphql.GraphExecut writeJson(w, responses(ctx)) } -func jsonDecode(r io.Reader, val interface{}) error { +func jsonDecode(r io.Reader, val any) error { dec := json.NewDecoder(r) dec.UseNumber() return dec.Decode(val) diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go index b54a27d0..bd511525 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go @@ -64,10 +64,10 @@ func (h GRAPHQL) Do(w http.ResponseWriter, r *http.Request, exec graphql.GraphEx return } - rc, OpErr := exec.CreateOperationContext(ctx, params) - if OpErr != nil { - w.WriteHeader(statusFor(OpErr)) - resp := exec.DispatchError(graphql.WithOperationContext(ctx, rc), OpErr) + rc, opErr := exec.CreateOperationContext(ctx, params) + if opErr != nil { + w.WriteHeader(statusFor(opErr)) + resp := exec.DispatchError(graphql.WithOperationContext(ctx, rc), opErr) writeJson(w, resp) return } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go index a37010ab..985f8db2 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_post.go @@ -78,10 +78,10 @@ func (h POST) Do(w http.ResponseWriter, r *http.Request, exec graphql.GraphExecu return } - rc, OpErr := exec.CreateOperationContext(ctx, params) - if OpErr != nil { - w.WriteHeader(statusFor(OpErr)) - resp := exec.DispatchError(graphql.WithOperationContext(ctx, rc), OpErr) + rc, opErr := exec.CreateOperationContext(ctx, params) + if opErr != nil { + w.WriteHeader(statusFor(opErr)) + resp := exec.DispatchError(graphql.WithOperationContext(ctx, rc), opErr) writeJson(w, resp) return } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go index 19b7521c..aca7207e 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/util.go @@ -22,7 +22,7 @@ func writeJsonError(w io.Writer, msg string) { writeJson(w, &graphql.Response{Errors: gqlerror.List{{Message: msg}}}) } -func writeJsonErrorf(w io.Writer, format string, args ...interface{}) { +func writeJsonErrorf(w io.Writer, format string, args ...any) { writeJson(w, &graphql.Response{Errors: gqlerror.List{{Message: fmt.Sprintf(format, args...)}}}) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go index 236a30c2..651ccee4 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go @@ -193,7 +193,7 @@ func (c *wsConnection) init() bool { } } - var initAckPayload *InitPayload = nil + var initAckPayload *InitPayload if c.InitFunc != nil { var ctx context.Context ctx, initAckPayload, err = c.InitFunc(c.ctx, c.initPayload) @@ -480,7 +480,7 @@ func (c *wsConnection) sendError(id string, errors ...*gqlerror.Error) { c.write(&message{t: errorMessageType, id: id, payload: b}) } -func (c *wsConnection) sendConnectionError(format string, args ...interface{}) { +func (c *wsConnection) sendConnectionError(format string, args ...any) { b, err := json.Marshal(&gqlerror.Error{Message: fmt.Sprintf(format, args...)}) if err != nil { panic(err) diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket_init.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket_init.go index a5f84ba2..35105535 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket_init.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket_init.go @@ -10,7 +10,7 @@ const ( // InitPayload is a structure that is parsed from the websocket init message payload. TO use // request headers for non-websocket, instead wrap the graphql handler in a middleware. -type InitPayload map[string]interface{} +type InitPayload map[string]any // GetString safely gets a string value from the payload. It returns an empty string if the // payload is nil or the value isn't set. diff --git a/vendor/github.com/99designs/gqlgen/graphql/id.go b/vendor/github.com/99designs/gqlgen/graphql/id.go index 0583995f..2a946dfa 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/id.go +++ b/vendor/github.com/99designs/gqlgen/graphql/id.go @@ -11,7 +11,7 @@ func MarshalID(s string) Marshaler { return MarshalString(s) } -func UnmarshalID(v interface{}) (string, error) { +func UnmarshalID(v any) (string, error) { switch v := v.(type) { case string: return v, nil @@ -22,13 +22,9 @@ func UnmarshalID(v interface{}) (string, error) { case int64: return strconv.FormatInt(v, 10), nil case float64: - return fmt.Sprintf("%f", v), nil + return strconv.FormatFloat(v, 'f', 6, 64), nil case bool: - if v { - return "true", nil - } else { - return "false", nil - } + return strconv.FormatBool(v), nil case nil: return "null", nil default: @@ -42,7 +38,7 @@ func MarshalIntID(i int) Marshaler { }) } -func UnmarshalIntID(v interface{}) (int, error) { +func UnmarshalIntID(v any) (int, error) { switch v := v.(type) { case string: return strconv.Atoi(v) @@ -63,7 +59,7 @@ func MarshalUintID(i uint) Marshaler { }) } -func UnmarshalUintID(v interface{}) (uint, error) { +func UnmarshalUintID(v any) (uint, error) { switch v := v.(type) { case string: result, err := strconv.ParseUint(v, 10, 64) diff --git a/vendor/github.com/99designs/gqlgen/graphql/input.go b/vendor/github.com/99designs/gqlgen/graphql/input.go index 88c3efaa..681fe080 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/input.go +++ b/vendor/github.com/99designs/gqlgen/graphql/input.go @@ -10,7 +10,7 @@ const unmarshalInputCtx key = "unmarshal_input_context" // BuildUnmarshalerMap returns a map of unmarshal functions of the ExecutableContext // to use with the WithUnmarshalerMap function. -func BuildUnmarshalerMap(unmarshaler ...interface{}) map[reflect.Type]reflect.Value { +func BuildUnmarshalerMap(unmarshaler ...any) map[reflect.Type]reflect.Value { maps := make(map[reflect.Type]reflect.Value) for _, v := range unmarshaler { ft := reflect.TypeOf(v) @@ -28,7 +28,7 @@ func WithUnmarshalerMap(ctx context.Context, maps map[reflect.Type]reflect.Value } // UnmarshalInputFromContext allows unmarshaling input object from a context. -func UnmarshalInputFromContext(ctx context.Context, raw, v interface{}) error { +func UnmarshalInputFromContext(ctx context.Context, raw, v any) error { m, ok := ctx.Value(unmarshalInputCtx).(map[reflect.Type]reflect.Value) if m == nil || !ok { return errors.New("graphql: the input context is empty") diff --git a/vendor/github.com/99designs/gqlgen/graphql/int.go b/vendor/github.com/99designs/gqlgen/graphql/int.go index 57d0d589..2a5604e9 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/int.go +++ b/vendor/github.com/99designs/gqlgen/graphql/int.go @@ -13,7 +13,7 @@ func MarshalInt(i int) Marshaler { }) } -func UnmarshalInt(v interface{}) (int, error) { +func UnmarshalInt(v any) (int, error) { switch v := v.(type) { case string: return strconv.Atoi(v) @@ -34,7 +34,7 @@ func MarshalInt64(i int64) Marshaler { }) } -func UnmarshalInt64(v interface{}) (int64, error) { +func UnmarshalInt64(v any) (int64, error) { switch v := v.(type) { case string: return strconv.ParseInt(v, 10, 64) @@ -55,7 +55,7 @@ func MarshalInt32(i int32) Marshaler { }) } -func UnmarshalInt32(v interface{}) (int32, error) { +func UnmarshalInt32(v any) (int32, error) { switch v := v.(type) { case string: iv, err := strconv.ParseInt(v, 10, 32) diff --git a/vendor/github.com/99designs/gqlgen/graphql/jsonw.go b/vendor/github.com/99designs/gqlgen/graphql/jsonw.go index 54e293f1..16bb63b7 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/jsonw.go +++ b/vendor/github.com/99designs/gqlgen/graphql/jsonw.go @@ -28,7 +28,7 @@ type Marshaler interface { } type Unmarshaler interface { - UnmarshalGQL(v interface{}) error + UnmarshalGQL(v any) error } type ContextMarshaler interface { @@ -36,7 +36,7 @@ type ContextMarshaler interface { } type ContextUnmarshaler interface { - UnmarshalGQLContext(ctx context.Context, v interface{}) error + UnmarshalGQLContext(ctx context.Context, v any) error } type contextMarshalerAdapter struct { diff --git a/vendor/github.com/99designs/gqlgen/graphql/playground/altair_playground.go b/vendor/github.com/99designs/gqlgen/graphql/playground/altair_playground.go index 6928828c..f7c55cbd 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/playground/altair_playground.go +++ b/vendor/github.com/99designs/gqlgen/graphql/playground/altair_playground.go @@ -66,7 +66,7 @@ var altairPage = template.Must(template.New("altair").Parse(` // AltairHandler responsible for setting up the altair playground func AltairHandler(title, endpoint string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - err := altairPage.Execute(w, map[string]interface{}{ + err := altairPage.Execute(w, map[string]any{ "title": title, "endpoint": endpoint, "endpointIsAbsolute": endpointHasScheme(endpoint), diff --git a/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go b/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go index 750420b4..f998b4d8 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go +++ b/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go @@ -64,7 +64,7 @@ func ApolloSandboxHandler(title, endpoint string, opts ...ApolloSandboxOption) h } return func(w http.ResponseWriter, r *http.Request) { - err := apolloSandboxPage.Execute(w, map[string]interface{}{ + err := apolloSandboxPage.Execute(w, map[string]any{ "title": title, "endpoint": endpoint, "endpointIsAbsolute": endpointHasScheme(endpoint), diff --git a/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go b/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go index 05ad0233..816fcca3 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go +++ b/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go @@ -85,17 +85,17 @@ var page = template.Must(template.New("graphiql").Parse(` `)) // Handler responsible for setting up the playground -func Handler(title string, endpoint string) http.HandlerFunc { +func Handler(title, endpoint string) http.HandlerFunc { return HandlerWithHeaders(title, endpoint, nil, nil) } // HandlerWithHeaders sets up the playground. // fetcherHeaders are used by the playground's fetcher instance and will not be visible in the UI. // uiHeaders are default headers that will show up in the UI headers editor. -func HandlerWithHeaders(title string, endpoint string, fetcherHeaders map[string]string, uiHeaders map[string]string) http.HandlerFunc { +func HandlerWithHeaders(title, endpoint string, fetcherHeaders, uiHeaders map[string]string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "text/html; charset=UTF-8") - err := page.Execute(w, map[string]interface{}{ + err := page.Execute(w, map[string]any{ "title": title, "endpoint": endpoint, "fetcherHeaders": fetcherHeaders, diff --git a/vendor/github.com/99designs/gqlgen/graphql/recovery.go b/vendor/github.com/99designs/gqlgen/graphql/recovery.go index 9bc0e47e..4aae6919 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/recovery.go +++ b/vendor/github.com/99designs/gqlgen/graphql/recovery.go @@ -9,9 +9,9 @@ import ( "github.com/vektah/gqlparser/v2/gqlerror" ) -type RecoverFunc func(ctx context.Context, err interface{}) (userMessage error) +type RecoverFunc func(ctx context.Context, err any) (userMessage error) -func DefaultRecover(ctx context.Context, err interface{}) error { +func DefaultRecover(ctx context.Context, err any) error { fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr) debug.PrintStack() diff --git a/vendor/github.com/99designs/gqlgen/graphql/response.go b/vendor/github.com/99designs/gqlgen/graphql/response.go index a82f27e2..e37b5cfc 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/response.go +++ b/vendor/github.com/99designs/gqlgen/graphql/response.go @@ -13,15 +13,15 @@ import ( // https://github.com/facebook/graphql/commit/7b40390d48680b15cb93e02d46ac5eb249689876#diff-757cea6edf0288677a9eea4cfc801d87R107 // and https://github.com/facebook/graphql/pull/384 type Response struct { - Errors gqlerror.List `json:"errors,omitempty"` - Data json.RawMessage `json:"data"` - Label string `json:"label,omitempty"` - Path ast.Path `json:"path,omitempty"` - HasNext *bool `json:"hasNext,omitempty"` - Extensions map[string]interface{} `json:"extensions,omitempty"` + Errors gqlerror.List `json:"errors,omitempty"` + Data json.RawMessage `json:"data"` + Label string `json:"label,omitempty"` + Path ast.Path `json:"path,omitempty"` + HasNext *bool `json:"hasNext,omitempty"` + Extensions map[string]any `json:"extensions,omitempty"` } -func ErrorResponse(ctx context.Context, messagef string, args ...interface{}) *Response { +func ErrorResponse(ctx context.Context, messagef string, args ...any) *Response { return &Response{ Errors: gqlerror.List{{Message: fmt.Sprintf(messagef, args...)}}, } diff --git a/vendor/github.com/99designs/gqlgen/graphql/stats.go b/vendor/github.com/99designs/gqlgen/graphql/stats.go index 31b9e605..1bf2ad9e 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/stats.go +++ b/vendor/github.com/99designs/gqlgen/graphql/stats.go @@ -14,7 +14,7 @@ type Stats struct { // Stats collected by handler extensions. Don't use directly, the extension should provide a type safe way to // access this. - extension map[string]interface{} + extension map[string]any } type TraceTiming struct { @@ -42,14 +42,14 @@ func GetStartTime(ctx context.Context) time.Time { return t } -func (c *Stats) SetExtension(name string, data interface{}) { +func (c *Stats) SetExtension(name string, data any) { if c.extension == nil { - c.extension = map[string]interface{}{} + c.extension = map[string]any{} } c.extension[name] = data } -func (c *Stats) GetExtension(name string) interface{} { +func (c *Stats) GetExtension(name string) any { if c.extension == nil { return nil } diff --git a/vendor/github.com/99designs/gqlgen/graphql/string.go b/vendor/github.com/99designs/gqlgen/graphql/string.go index 4da47064..61da5810 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/string.go +++ b/vendor/github.com/99designs/gqlgen/graphql/string.go @@ -47,7 +47,7 @@ func writeQuotedString(w io.Writer, s string) { io.WriteString(w, `"`) } -func UnmarshalString(v interface{}) (string, error) { +func UnmarshalString(v any) (string, error) { switch v := v.(type) { case string: return v, nil @@ -60,11 +60,7 @@ func UnmarshalString(v interface{}) (string, error) { case json.Number: return string(v), nil case bool: - if v { - return "true", nil - } else { - return "false", nil - } + return strconv.FormatBool(v), nil case nil: return "null", nil default: diff --git a/vendor/github.com/99designs/gqlgen/graphql/time.go b/vendor/github.com/99designs/gqlgen/graphql/time.go index ef3d17da..a5fe9030 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/time.go +++ b/vendor/github.com/99designs/gqlgen/graphql/time.go @@ -17,7 +17,7 @@ func MarshalTime(t time.Time) Marshaler { }) } -func UnmarshalTime(v interface{}) (time.Time, error) { +func UnmarshalTime(v any) (time.Time, error) { if tmpStr, ok := v.(string); ok { return time.Parse(time.RFC3339Nano, tmpStr) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/uint.go b/vendor/github.com/99designs/gqlgen/graphql/uint.go index 8730d900..ffccaf64 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/uint.go +++ b/vendor/github.com/99designs/gqlgen/graphql/uint.go @@ -14,7 +14,7 @@ func MarshalUint(i uint) Marshaler { }) } -func UnmarshalUint(v interface{}) (uint, error) { +func UnmarshalUint(v any) (uint, error) { switch v := v.(type) { case string: u64, err := strconv.ParseUint(v, 10, 64) @@ -45,7 +45,7 @@ func MarshalUint64(i uint64) Marshaler { }) } -func UnmarshalUint64(v interface{}) (uint64, error) { +func UnmarshalUint64(v any) (uint64, error) { switch v := v.(type) { case string: return strconv.ParseUint(v, 10, 64) @@ -74,7 +74,7 @@ func MarshalUint32(i uint32) Marshaler { }) } -func UnmarshalUint32(v interface{}) (uint32, error) { +func UnmarshalUint32(v any) (uint32, error) { switch v := v.(type) { case string: iv, err := strconv.ParseUint(v, 10, 32) diff --git a/vendor/github.com/99designs/gqlgen/graphql/upload.go b/vendor/github.com/99designs/gqlgen/graphql/upload.go index dafbde65..b603ab04 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/upload.go +++ b/vendor/github.com/99designs/gqlgen/graphql/upload.go @@ -18,7 +18,7 @@ func MarshalUpload(f Upload) Marshaler { }) } -func UnmarshalUpload(v interface{}) (Upload, error) { +func UnmarshalUpload(v any) (Upload, error) { upload, ok := v.(Upload) if !ok { return Upload{}, fmt.Errorf("%T is not an Upload", v) diff --git a/vendor/github.com/99designs/gqlgen/graphql/version.go b/vendor/github.com/99designs/gqlgen/graphql/version.go index 630b49f3..82266736 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/version.go +++ b/vendor/github.com/99designs/gqlgen/graphql/version.go @@ -1,3 +1,3 @@ package graphql -const Version = "v0.17.47" +const Version = "v0.17.49" diff --git a/vendor/github.com/99designs/gqlgen/internal/code/compare.go b/vendor/github.com/99designs/gqlgen/internal/code/compare.go index e521d31e..a3f15f18 100644 --- a/vendor/github.com/99designs/gqlgen/internal/code/compare.go +++ b/vendor/github.com/99designs/gqlgen/internal/code/compare.go @@ -1,6 +1,7 @@ package code import ( + "errors" "fmt" "go/types" ) @@ -32,7 +33,7 @@ func CompatibleTypes(expected, actual types.Type) error { case *types.Array: if actual, ok := actual.(*types.Array); ok { if expected.Len() != actual.Len() { - return fmt.Errorf("array length differs") + return errors.New("array length differs") } return CompatibleTypes(expected.Elem(), actual.Elem()) @@ -50,7 +51,7 @@ func CompatibleTypes(expected, actual types.Type) error { case *types.Struct: if actual, ok := actual.(*types.Struct); ok { if expected.NumFields() != actual.NumFields() { - return fmt.Errorf("number of struct fields differ") + return errors.New("number of struct fields differ") } for i := 0; i < expected.NumFields(); i++ { diff --git a/vendor/github.com/99designs/gqlgen/internal/code/imports.go b/vendor/github.com/99designs/gqlgen/internal/code/imports.go index 89ebab96..e950f1d5 100644 --- a/vendor/github.com/99designs/gqlgen/internal/code/imports.go +++ b/vendor/github.com/99designs/gqlgen/internal/code/imports.go @@ -102,21 +102,21 @@ func goModuleRoot(dir string) (string, bool) { // go.mod is not found in the tree, so the same sentinel value fits all the directories in a tree goModuleRootCache[d] = result } else { - if relPath, err := filepath.Rel(result.goModPath, d); err != nil { + relPath, err := filepath.Rel(result.goModPath, d) + if err != nil { panic(err) - } else { - path := result.moduleName - relPath := filepath.ToSlash(relPath) - if !strings.HasSuffix(relPath, "/") { - path += "/" - } - path += relPath - - goModuleRootCache[d] = goModuleSearchResult{ - path: path, - goModPath: result.goModPath, - moduleName: result.moduleName, - } + } + path := result.moduleName + relPath = filepath.ToSlash(relPath) + if !strings.HasSuffix(relPath, "/") { + path += "/" + } + path += relPath + + goModuleRootCache[d] = goModuleSearchResult{ + path: path, + goModPath: result.goModPath, + moduleName: result.moduleName, } } } diff --git a/vendor/github.com/99designs/gqlgen/main.go b/vendor/github.com/99designs/gqlgen/main.go index c920ec27..02f56343 100644 --- a/vendor/github.com/99designs/gqlgen/main.go +++ b/vendor/github.com/99designs/gqlgen/main.go @@ -100,17 +100,17 @@ var initCmd = &cli.Command{ cwd, err := os.Getwd() if err != nil { log.Println(err) - return fmt.Errorf("unable to determine current directory:%w", err) + return fmt.Errorf("unable to determine current directory: %w", err) } pkgName := code.ImportPathForDir(cwd) if pkgName == "" { - return fmt.Errorf( + return errors.New( "unable to determine import path for current directory, you probably need to run 'go mod init' first", ) } modRoot := findModuleRoot(cwd) if modRoot == "" { - return fmt.Errorf("go.mod is missing. Please, do 'go mod init' first\n") + return errors.New("go.mod is missing. Please, do 'go mod init' first\n") } // check schema and config don't already exist @@ -121,7 +121,7 @@ var initCmd = &cli.Command{ } _, err = config.LoadConfigFromDefaultLocations() if err == nil { - return fmt.Errorf("gqlgen.yml already exists in a parent directory\n") + return errors.New("gqlgen.yml already exists in a parent directory\n") } // create config @@ -187,10 +187,7 @@ var generateCmd = &cli.Command{ } } - if err = api.Generate(cfg); err != nil { - return err - } - return nil + return api.Generate(cfg) }, } diff --git a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go index 9b60923f..5f6ce94e 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go +++ b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go @@ -292,18 +292,17 @@ func (m *Plugin) MutateConfig(cfg *config.Config) error { getter += "\treturn interfaceSlice\n" getter += "}" return getter - } else { - getter := fmt.Sprintf("func (this %s) Get%s() %s { return ", templates.ToGo(model.Name), field.GoName, goType) - - if interfaceFieldTypeIsPointer && !structFieldTypeIsPointer { - getter += "&" - } else if !interfaceFieldTypeIsPointer && structFieldTypeIsPointer { - getter += "*" - } + } + getter := fmt.Sprintf("func (this %s) Get%s() %s { return ", templates.ToGo(model.Name), field.GoName, goType) - getter += fmt.Sprintf("this.%s }", field.GoName) - return getter + if interfaceFieldTypeIsPointer && !structFieldTypeIsPointer { + getter += "&" + } else if !interfaceFieldTypeIsPointer && structFieldTypeIsPointer { + getter += "*" } + + getter += fmt.Sprintf("this.%s }", field.GoName) + return getter } funcMap := template.FuncMap{ "getInterfaceByName": getInterfaceByName, @@ -446,36 +445,67 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition) fields = append(fields, f) } - // appending extra fields at the end of the fields list. - modelcfg := cfg.Models[schemaType.Name] + fields = append(fields, getExtraFields(cfg, schemaType.Name)...) + + return fields, nil +} + +func getExtraFields(cfg *config.Config, modelName string) []*Field { + modelcfg := cfg.Models[modelName] + + extraFieldsCount := len(modelcfg.ExtraFields) + len(modelcfg.EmbedExtraFields) + if extraFieldsCount == 0 { + return nil + } + + extraFields := make([]*Field, 0, extraFieldsCount) + + makeExtraField := func(fname string, fspec config.ModelExtraField) *Field { + ftype := buildType(fspec.Type) + + tag := `json:"-"` + if fspec.OverrideTags != "" { + tag = fspec.OverrideTags + } + + return &Field{ + Name: fname, + GoName: fname, + Type: ftype, + Description: fspec.Description, + Tag: tag, + } + } + if len(modelcfg.ExtraFields) > 0 { - ff := make([]*Field, 0, len(modelcfg.ExtraFields)) for fname, fspec := range modelcfg.ExtraFields { - ftype := buildType(fspec.Type) + extraFields = append(extraFields, makeExtraField(fname, fspec)) + } + } - tag := `json:"-"` - if fspec.OverrideTags != "" { - tag = fspec.OverrideTags - } + if len(modelcfg.EmbedExtraFields) > 0 { + for _, fspec := range modelcfg.EmbedExtraFields { + extraFields = append(extraFields, makeExtraField("", fspec)) + } + } - ff = append(ff, - &Field{ - Name: fname, - GoName: fname, - Type: ftype, - Description: fspec.Description, - Tag: tag, - }) + sort.Slice(extraFields, func(i, j int) bool { + if extraFields[i].Name == "" && extraFields[j].Name == "" { + return extraFields[i].Type.String() < extraFields[j].Type.String() } - sort.Slice(ff, func(i, j int) bool { - return ff[i].Name < ff[j].Name - }) + if extraFields[i].Name == "" { + return false + } - fields = append(fields, ff...) - } + if extraFields[j].Name == "" { + return true + } - return fields, nil + return extraFields[i].Name < extraFields[j].Name + }) + + return extraFields } func getStructTagFromField(cfg *config.Config, field *ast.FieldDefinition) string { @@ -591,7 +621,7 @@ func removeDuplicateTags(t string) string { key := kv[0] value := strings.Join(kv[1:], ":") processed[key] = true - if len(returnTags) > 0 { + if returnTags != "" { returnTags = " " + returnTags } diff --git a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go index 97a20477..38138d52 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go +++ b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go @@ -138,7 +138,7 @@ func (m *Plugin) generatePerSchema(data *codegen.Data) error { continue } if implExists { - return fmt.Errorf("multiple plugins implement ResolverImplementer") + return errors.New("multiple plugins implement ResolverImplementer") } implExists = true resolver.ImplementationRender = rImpl.Implement @@ -269,7 +269,7 @@ func (r *Resolver) Implementation() string { return r.ImplementationStr } -func gqlToResolverName(base string, gqlname, filenameTmpl string) string { +func gqlToResolverName(base, gqlname, filenameTmpl string) string { gqlname = filepath.Base(gqlname) ext := filepath.Ext(gqlname) if filenameTmpl == "" { diff --git a/vendor/github.com/casbin/casbin/v2/.gitignore b/vendor/github.com/casbin/casbin/v2/.gitignore deleted file mode 100644 index da27805f..00000000 --- a/vendor/github.com/casbin/casbin/v2/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea/ -*.iml - -# vendor files -vendor diff --git a/vendor/github.com/casbin/casbin/v2/.golangci.yml b/vendor/github.com/casbin/casbin/v2/.golangci.yml deleted file mode 100644 index b8d36201..00000000 --- a/vendor/github.com/casbin/casbin/v2/.golangci.yml +++ /dev/null @@ -1,354 +0,0 @@ -# Based on https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322 -# This code is licensed under the terms of the MIT license https://opensource.org/license/mit -# Copyright (c) 2021 Marat Reymers - -## Golden config for golangci-lint v1.56.2 -# -# This is the best config for golangci-lint based on my experience and opinion. -# It is very strict, but not extremely strict. -# Feel free to adapt and change it for your needs. - -run: - # Timeout for analysis, e.g. 30s, 5m. - # Default: 1m - timeout: 3m - - -# This file contains only configs which differ from defaults. -# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml -linters-settings: - cyclop: - # The maximal code complexity to report. - # Default: 10 - max-complexity: 30 - # The maximal average package complexity. - # If it's higher than 0.0 (float) the check is enabled - # Default: 0.0 - package-average: 10.0 - - errcheck: - # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. - # Such cases aren't reported by default. - # Default: false - check-type-assertions: true - - exhaustive: - # Program elements to check for exhaustiveness. - # Default: [ switch ] - check: - - switch - - map - - exhaustruct: - # List of regular expressions to exclude struct packages and their names from checks. - # Regular expressions must match complete canonical struct package/name/structname. - # Default: [] - exclude: - # std libs - - "^net/http.Client$" - - "^net/http.Cookie$" - - "^net/http.Request$" - - "^net/http.Response$" - - "^net/http.Server$" - - "^net/http.Transport$" - - "^net/url.URL$" - - "^os/exec.Cmd$" - - "^reflect.StructField$" - # public libs - - "^github.com/Shopify/sarama.Config$" - - "^github.com/Shopify/sarama.ProducerMessage$" - - "^github.com/mitchellh/mapstructure.DecoderConfig$" - - "^github.com/prometheus/client_golang/.+Opts$" - - "^github.com/spf13/cobra.Command$" - - "^github.com/spf13/cobra.CompletionOptions$" - - "^github.com/stretchr/testify/mock.Mock$" - - "^github.com/testcontainers/testcontainers-go.+Request$" - - "^github.com/testcontainers/testcontainers-go.FromDockerfile$" - - "^golang.org/x/tools/go/analysis.Analyzer$" - - "^google.golang.org/protobuf/.+Options$" - - "^gopkg.in/yaml.v3.Node$" - - funlen: - # Checks the number of lines in a function. - # If lower than 0, disable the check. - # Default: 60 - lines: 100 - # Checks the number of statements in a function. - # If lower than 0, disable the check. - # Default: 40 - statements: 50 - # Ignore comments when counting lines. - # Default false - ignore-comments: true - - gocognit: - # Minimal code complexity to report. - # Default: 30 (but we recommend 10-20) - min-complexity: 20 - - gocritic: - # Settings passed to gocritic. - # The settings key is the name of a supported gocritic checker. - # The list of supported checkers can be find in https://go-critic.github.io/overview. - settings: - captLocal: - # Whether to restrict checker to params only. - # Default: true - paramsOnly: false - underef: - # Whether to skip (*x).method() calls where x is a pointer receiver. - # Default: true - skipRecvDeref: false - - gomnd: - # List of function patterns to exclude from analysis. - # Values always ignored: `time.Date`, - # `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`, - # `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`. - # Default: [] - ignored-functions: - - flag.Arg - - flag.Duration.* - - flag.Float.* - - flag.Int.* - - flag.Uint.* - - os.Chmod - - os.Mkdir.* - - os.OpenFile - - os.WriteFile - - prometheus.ExponentialBuckets.* - - prometheus.LinearBuckets - - gomodguard: - blocked: - # List of blocked modules. - # Default: [] - modules: - - github.com/golang/protobuf: - recommendations: - - google.golang.org/protobuf - reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" - - github.com/satori/go.uuid: - recommendations: - - github.com/google/uuid - reason: "satori's package is not maintained" - - github.com/gofrs/uuid: - recommendations: - - github.com/gofrs/uuid/v5 - reason: "gofrs' package was not go module before v5" - - govet: - # Enable all analyzers. - # Default: false - enable-all: true - # Disable analyzers by name. - # Run `go tool vet help` to see all analyzers. - # Default: [] - disable: - - fieldalignment # too strict - # Settings per analyzer. - settings: - shadow: - # Whether to be strict about shadowing; can be noisy. - # Default: false - #strict: true - - inamedparam: - # Skips check for interface methods with only a single parameter. - # Default: false - skip-single-param: true - - nakedret: - # Make an issue if func has more lines of code than this setting, and it has naked returns. - # Default: 30 - max-func-lines: 0 - - nolintlint: - # Exclude following linters from requiring an explanation. - # Default: [] - allow-no-explanation: [ funlen, gocognit, lll ] - # Enable to require an explanation of nonzero length after each nolint directive. - # Default: false - require-explanation: true - # Enable to require nolint directives to mention the specific linter being suppressed. - # Default: false - require-specific: true - - perfsprint: - # Optimizes into strings concatenation. - # Default: true - strconcat: false - - rowserrcheck: - # database/sql is always checked - # Default: [] - packages: - - github.com/jmoiron/sqlx - - tenv: - # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. - # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. - # Default: false - all: true - - stylecheck: - # STxxxx checks in https://staticcheck.io/docs/configuration/options/#checks - # Default: ["*"] - checks: ["all", "-ST1003"] - - revive: - rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter - - name: unused-parameter - disabled: true - -linters: - disable-all: true - enable: - ## enabled by default - #- errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases - - gosimple # specializes in simplifying a code - - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - - ineffassign # detects when assignments to existing variables are not used - - staticcheck # is a go vet on steroids, applying a ton of static analysis checks - - typecheck # like the front-end of a Go compiler, parses and type-checks Go code - - unused # checks for unused constants, variables, functions and types - ## disabled by default - - asasalint # checks for pass []any as any in variadic func(...any) - - asciicheck # checks that your code does not contain non-ASCII identifiers - - bidichk # checks for dangerous unicode character sequences - - bodyclose # checks whether HTTP response body is closed successfully - - cyclop # checks function and package cyclomatic complexity - - dupl # tool for code clone detection - - durationcheck # checks for two durations multiplied together - - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error - #- errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13 - - execinquery # checks query string in Query function which reads your Go src files and warning it finds - - exhaustive # checks exhaustiveness of enum switch statements - - exportloopref # checks for pointers to enclosing loop variables - #- forbidigo # forbids identifiers - - funlen # tool for detection of long functions - - gocheckcompilerdirectives # validates go compiler directive comments (//go:) - #- gochecknoglobals # checks that no global variables exist - - gochecknoinits # checks that no init functions are present in Go code - - gochecksumtype # checks exhaustiveness on Go "sum types" - #- gocognit # computes and checks the cognitive complexity of functions - #- goconst # finds repeated strings that could be replaced by a constant - #- gocritic # provides diagnostics that check for bugs, performance and style issues - - gocyclo # computes and checks the cyclomatic complexity of functions - - godot # checks if comments end in a period - - goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt - #- gomnd # detects magic numbers - - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod - - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations - - goprintffuncname # checks that printf-like functions are named with f at the end - - gosec # inspects source code for security problems - #- lll # reports long lines - - loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap) - - makezero # finds slice declarations with non-zero initial length - - mirror # reports wrong mirror patterns of bytes/strings usage - - musttag # enforces field tags in (un)marshaled structs - - nakedret # finds naked returns in functions greater than a specified function length - - nestif # reports deeply nested if statements - - nilerr # finds the code that returns nil even if it checks that the error is not nil - #- nilnil # checks that there is no simultaneous return of nil error and an invalid value - - noctx # finds sending http request without context.Context - - nolintlint # reports ill-formed or insufficient nolint directives - #- nonamedreturns # reports all named returns - - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL - #- perfsprint # checks that fmt.Sprintf can be replaced with a faster alternative - - predeclared # finds code that shadows one of Go's predeclared identifiers - - promlinter # checks Prometheus metrics naming via promlint - - protogetter # reports direct reads from proto message fields when getters should be used - - reassign # checks that package variables are not reassigned - - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint - - rowserrcheck # checks whether Err of rows is checked successfully - - sloglint # ensure consistent code style when using log/slog - - spancheck # checks for mistakes with OpenTelemetry/Census spans - - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed - - stylecheck # is a replacement for golint - - tenv # detects using os.Setenv instead of t.Setenv since Go1.17 - - testableexamples # checks if examples are testable (have an expected output) - - testifylint # checks usage of github.com/stretchr/testify - #- testpackage # makes you use a separate _test package - - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes - - unconvert # removes unnecessary type conversions - #- unparam # reports unused function parameters - - usestdlibvars # detects the possibility to use variables/constants from the Go standard library - - wastedassign # finds wasted assignment statements - - whitespace # detects leading and trailing whitespace - - ## you may want to enable - #- decorder # checks declaration order and count of types, constants, variables and functions - #- exhaustruct # [highly recommend to enable] checks if all structure fields are initialized - #- gci # controls golang package import order and makes it always deterministic - #- ginkgolinter # [if you use ginkgo/gomega] enforces standards of using ginkgo and gomega - #- godox # detects FIXME, TODO and other comment keywords - #- goheader # checks is file header matches to pattern - #- inamedparam # [great idea, but too strict, need to ignore a lot of cases by default] reports interfaces with unnamed method parameters - #- interfacebloat # checks the number of methods inside an interface - #- ireturn # accept interfaces, return concrete types - #- prealloc # [premature optimization, but can be used in some cases] finds slice declarations that could potentially be preallocated - #- tagalign # checks that struct tags are well aligned - #- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope - #- wrapcheck # checks that errors returned from external packages are wrapped - #- zerologlint # detects the wrong usage of zerolog that a user forgets to dispatch zerolog.Event - - ## disabled - #- containedctx # detects struct contained context.Context field - #- contextcheck # [too many false positives] checks the function whether use a non-inherited context - #- depguard # [replaced by gomodguard] checks if package imports are in a list of acceptable packages - #- dogsled # checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) - #- dupword # [useless without config] checks for duplicate words in the source code - #- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted - #- forcetypeassert # [replaced by errcheck] finds forced type assertions - #- goerr113 # [too strict] checks the errors handling expressions - #- gofmt # [replaced by goimports] checks whether code was gofmt-ed - #- gofumpt # [replaced by goimports, gofumports is not available yet] checks whether code was gofumpt-ed - #- gosmopolitan # reports certain i18n/l10n anti-patterns in your Go codebase - #- grouper # analyzes expression groups - #- importas # enforces consistent import aliases - #- maintidx # measures the maintainability index of each function - #- misspell # [useless] finds commonly misspelled English words in comments - #- nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity - #- paralleltest # [too many false positives] detects missing usage of t.Parallel() method in your Go test - #- tagliatelle # checks the struct tags - #- thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers - #- wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines - - ## deprecated - #- deadcode # [deprecated, replaced by unused] finds unused code - #- exhaustivestruct # [deprecated, replaced by exhaustruct] checks if all struct's fields are initialized - #- golint # [deprecated, replaced by revive] golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes - #- ifshort # [deprecated] checks that your code uses short syntax for if-statements whenever possible - #- interfacer # [deprecated] suggests narrower interface types - #- maligned # [deprecated, replaced by govet fieldalignment] detects Go structs that would take less memory if their fields were sorted - #- nosnakecase # [deprecated, replaced by revive var-naming] detects snake case of variable naming and function name - #- scopelint # [deprecated, replaced by exportloopref] checks for unpinned variables in go programs - #- structcheck # [deprecated, replaced by unused] finds unused struct fields - #- varcheck # [deprecated, replaced by unused] finds unused global variables and constants - - -issues: - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - max-same-issues: 50 - - exclude-rules: - - source: "(noinspection|TODO)" - linters: [ godot ] - - source: "//noinspection" - linters: [ gocritic ] - - path: "_test\\.go" - linters: - - bodyclose - - dupl - - funlen - - goconst - - gosec - - noctx - - wrapcheck - # TODO: remove after PR is released https://github.com/golangci/golangci-lint/pull/4386 - - text: "fmt.Sprintf can be replaced with string addition" - linters: [ perfsprint ] \ No newline at end of file diff --git a/vendor/github.com/casbin/casbin/v2/.releaserc.json b/vendor/github.com/casbin/casbin/v2/.releaserc.json deleted file mode 100644 index 58cb0bb4..00000000 --- a/vendor/github.com/casbin/casbin/v2/.releaserc.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "debug": true, - "branches": [ - "+([0-9])?(.{+([0-9]),x}).x", - "master", - { - "name": "beta", - "prerelease": true - } - ], - "plugins": [ - "@semantic-release/commit-analyzer", - "@semantic-release/release-notes-generator", - "@semantic-release/github" - ] -} diff --git a/vendor/github.com/casbin/casbin/v2/.travis.yml b/vendor/github.com/casbin/casbin/v2/.travis.yml deleted file mode 100644 index a35e0622..00000000 --- a/vendor/github.com/casbin/casbin/v2/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -sudo: false - -env: - - GO111MODULE=on - -go: - - "1.11.13" - - "1.12" - - "1.13" - - "1.14" - -script: - - make test diff --git a/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md b/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md deleted file mode 100644 index 4bab59c9..00000000 --- a/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md +++ /dev/null @@ -1,35 +0,0 @@ -# How to contribute - -The following is a set of guidelines for contributing to casbin and its libraries, which are hosted at [casbin organization at Github](https://github.com/casbin). - -This project adheres to the [Contributor Covenant 1.2.](https://www.contributor-covenant.org/version/1/2/0/code-of-conduct.html) By participating, you are expected to uphold this code. Please report unacceptable behavior to info@casbin.com. - -## Questions - -- We do our best to have an [up-to-date documentation](https://casbin.org/docs/overview) -- [Stack Overflow](https://stackoverflow.com) is the best place to start if you have a question. Please use the [casbin tag](https://stackoverflow.com/tags/casbin/info) we are actively monitoring. We encourage you to use Stack Overflow specially for Modeling Access Control Problems, in order to build a shared knowledge base. -- You can also join our [Discord](https://discord.gg/S5UjpzGZjN). - -## Reporting issues - -Reporting issues are a great way to contribute to the project. We are perpetually grateful about a well-written, through bug report. - -Before raising a new issue, check our [issue list](https://github.com/casbin/casbin/issues) to determine if it already contains the problem that you are facing. - -A good bug report shouldn't leave others needing to chase you for more information. Please be as detailed as possible. The following questions might serve as a template for writing a detailed report: - -What were you trying to achieve? -What are the expected results? -What are the received results? -What are the steps to reproduce the issue? -In what environment did you encounter the issue? - -Feature requests can also be submitted as issues. - -## Pull requests - -Good pull requests (e.g. patches, improvements, new features) are a fantastic help. They should remain focused in scope and avoid unrelated commits. - -Please ask first before embarking on any significant pull request (e.g. implementing new features, refactoring code etc.), otherwise you risk spending a lot of time working on something that the maintainers might not want to merge into the project. - -First add an issue to the project to discuss the improvement. Please adhere to the coding conventions used throughout the project. If in doubt, consult the [Effective Go style guide](https://golang.org/doc/effective_go.html). diff --git a/vendor/github.com/casbin/casbin/v2/LICENSE b/vendor/github.com/casbin/casbin/v2/LICENSE deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/github.com/casbin/casbin/v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/casbin/casbin/v2/Makefile b/vendor/github.com/casbin/casbin/v2/Makefile deleted file mode 100644 index 6db2b920..00000000 --- a/vendor/github.com/casbin/casbin/v2/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -SHELL = /bin/bash -export PATH := $(shell yarn global bin):$(PATH) - -default: lint test - -test: - go test -race -v ./... - -benchmark: - go test -bench=. - -lint: - golangci-lint run --verbose - -release: - yarn global add semantic-release@17.2.4 - semantic-release - diff --git a/vendor/github.com/casbin/casbin/v2/README.md b/vendor/github.com/casbin/casbin/v2/README.md deleted file mode 100644 index 36549f55..00000000 --- a/vendor/github.com/casbin/casbin/v2/README.md +++ /dev/null @@ -1,296 +0,0 @@ -Casbin -==== - -[![Go Report Card](https://goreportcard.com/badge/github.com/casbin/casbin)](https://goreportcard.com/report/github.com/casbin/casbin) -[![Build](https://github.com/casbin/casbin/actions/workflows/default.yml/badge.svg)](https://github.com/casbin/casbin/actions/workflows/default.yml) -[![Coverage Status](https://coveralls.io/repos/github/casbin/casbin/badge.svg?branch=master)](https://coveralls.io/github/casbin/casbin?branch=master) -[![Godoc](https://godoc.org/github.com/casbin/casbin?status.svg)](https://pkg.go.dev/github.com/casbin/casbin/v2) -[![Release](https://img.shields.io/github/release/casbin/casbin.svg)](https://github.com/casbin/casbin/releases/latest) -[![Discord](https://img.shields.io/discord/1022748306096537660?logo=discord&label=discord&color=5865F2)](https://discord.gg/S5UjpzGZjN) -[![Sourcegraph](https://sourcegraph.com/github.com/casbin/casbin/-/badge.svg)](https://sourcegraph.com/github.com/casbin/casbin?badge) - -**News**: still worry about how to write the correct Casbin policy? ``Casbin online editor`` is coming to help! Try it at: https://casbin.org/editor/ - -![casbin Logo](casbin-logo.png) - -Casbin is a powerful and efficient open-source access control library for Golang projects. It provides support for enforcing authorization based on various [access control models](https://en.wikipedia.org/wiki/Computer_security_model). - -

- Sponsored by -
- - - - - - -
- Build auth with fraud prevention, faster.
Try Stytch for API-first authentication, user & org management, multi-tenant SSO, MFA, device fingerprinting, and more.
-
-

- -## All the languages supported by Casbin: - -| [![golang](https://casbin.org/img/langs/golang.png)](https://github.com/casbin/casbin) | [![java](https://casbin.org/img/langs/java.png)](https://github.com/casbin/jcasbin) | [![nodejs](https://casbin.org/img/langs/nodejs.png)](https://github.com/casbin/node-casbin) | [![php](https://casbin.org/img/langs/php.png)](https://github.com/php-casbin/php-casbin) | -|----------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------| -| [Casbin](https://github.com/casbin/casbin) | [jCasbin](https://github.com/casbin/jcasbin) | [node-Casbin](https://github.com/casbin/node-casbin) | [PHP-Casbin](https://github.com/php-casbin/php-casbin) | -| production-ready | production-ready | production-ready | production-ready | - -| [![python](https://casbin.org/img/langs/python.png)](https://github.com/casbin/pycasbin) | [![dotnet](https://casbin.org/img/langs/dotnet.png)](https://github.com/casbin-net/Casbin.NET) | [![c++](https://casbin.org/img/langs/cpp.png)](https://github.com/casbin/casbin-cpp) | [![rust](https://casbin.org/img/langs/rust.png)](https://github.com/casbin/casbin-rs) | -|------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------| -| [PyCasbin](https://github.com/casbin/pycasbin) | [Casbin.NET](https://github.com/casbin-net/Casbin.NET) | [Casbin-CPP](https://github.com/casbin/casbin-cpp) | [Casbin-RS](https://github.com/casbin/casbin-rs) | -| production-ready | production-ready | production-ready | production-ready | - -## Table of contents - -- [Supported models](#supported-models) -- [How it works?](#how-it-works) -- [Features](#features) -- [Installation](#installation) -- [Documentation](#documentation) -- [Online editor](#online-editor) -- [Tutorials](#tutorials) -- [Get started](#get-started) -- [Policy management](#policy-management) -- [Policy persistence](#policy-persistence) -- [Policy consistence between multiple nodes](#policy-consistence-between-multiple-nodes) -- [Role manager](#role-manager) -- [Benchmarks](#benchmarks) -- [Examples](#examples) -- [Middlewares](#middlewares) -- [Our adopters](#our-adopters) - -## Supported models - -1. [**ACL (Access Control List)**](https://en.wikipedia.org/wiki/Access_control_list) -2. **ACL with [superuser](https://en.wikipedia.org/wiki/Superuser)** -3. **ACL without users**: especially useful for systems that don't have authentication or user log-ins. -3. **ACL without resources**: some scenarios may target for a type of resources instead of an individual resource by using permissions like ``write-article``, ``read-log``. It doesn't control the access to a specific article or log. -4. **[RBAC (Role-Based Access Control)](https://en.wikipedia.org/wiki/Role-based_access_control)** -5. **RBAC with resource roles**: both users and resources can have roles (or groups) at the same time. -6. **RBAC with domains/tenants**: users can have different role sets for different domains/tenants. -7. **[ABAC (Attribute-Based Access Control)](https://en.wikipedia.org/wiki/Attribute-Based_Access_Control)**: syntax sugar like ``resource.Owner`` can be used to get the attribute for a resource. -8. **[RESTful](https://en.wikipedia.org/wiki/Representational_state_transfer)**: supports paths like ``/res/*``, ``/res/:id`` and HTTP methods like ``GET``, ``POST``, ``PUT``, ``DELETE``. -9. **Deny-override**: both allow and deny authorizations are supported, deny overrides the allow. -10. **Priority**: the policy rules can be prioritized like firewall rules. - -## How it works? - -In Casbin, an access control model is abstracted into a CONF file based on the **PERM metamodel (Policy, Effect, Request, Matchers)**. So switching or upgrading the authorization mechanism for a project is just as simple as modifying a configuration. You can customize your own access control model by combining the available models. For example, you can get RBAC roles and ABAC attributes together inside one model and share one set of policy rules. - -The most basic and simplest model in Casbin is ACL. ACL's model CONF is: - -```ini -# Request definition -[request_definition] -r = sub, obj, act - -# Policy definition -[policy_definition] -p = sub, obj, act - -# Policy effect -[policy_effect] -e = some(where (p.eft == allow)) - -# Matchers -[matchers] -m = r.sub == p.sub && r.obj == p.obj && r.act == p.act - -``` - -An example policy for ACL model is like: - -``` -p, alice, data1, read -p, bob, data2, write -``` - -It means: - -- alice can read data1 -- bob can write data2 - -We also support multi-line mode by appending '\\' in the end: - -```ini -# Matchers -[matchers] -m = r.sub == p.sub && r.obj == p.obj \ - && r.act == p.act -``` - -Further more, if you are using ABAC, you can try operator `in` like following in Casbin **golang** edition (jCasbin and Node-Casbin are not supported yet): - -```ini -# Matchers -[matchers] -m = r.obj == p.obj && r.act == p.act || r.obj in ('data2', 'data3') -``` - -But you **SHOULD** make sure that the length of the array is **MORE** than **1**, otherwise there will cause it to panic. - -For more operators, you may take a look at [govaluate](https://github.com/casbin/govaluate) - -## Features - -What Casbin does: - -1. enforce the policy in the classic ``{subject, object, action}`` form or a customized form as you defined, both allow and deny authorizations are supported. -2. handle the storage of the access control model and its policy. -3. manage the role-user mappings and role-role mappings (aka role hierarchy in RBAC). -4. support built-in superuser like ``root`` or ``administrator``. A superuser can do anything without explicit permissions. -5. multiple built-in operators to support the rule matching. For example, ``keyMatch`` can map a resource key ``/foo/bar`` to the pattern ``/foo*``. - -What Casbin does NOT do: - -1. authentication (aka verify ``username`` and ``password`` when a user logs in) -2. manage the list of users or roles. I believe it's more convenient for the project itself to manage these entities. Users usually have their passwords, and Casbin is not designed as a password container. However, Casbin stores the user-role mapping for the RBAC scenario. - -## Installation - -``` -go get github.com/casbin/casbin/v2 -``` - -## Documentation - -https://casbin.org/docs/overview - -## Online editor - -You can also use the online editor (https://casbin.org/editor/) to write your Casbin model and policy in your web browser. It provides functionality such as ``syntax highlighting`` and ``code completion``, just like an IDE for a programming language. - -## Tutorials - -https://casbin.org/docs/tutorials - -## Get started - -1. New a Casbin enforcer with a model file and a policy file: - - ```go - e, _ := casbin.NewEnforcer("path/to/model.conf", "path/to/policy.csv") - ``` - -Note: you can also initialize an enforcer with policy in DB instead of file, see [Policy-persistence](#policy-persistence) section for details. - -2. Add an enforcement hook into your code right before the access happens: - - ```go - sub := "alice" // the user that wants to access a resource. - obj := "data1" // the resource that is going to be accessed. - act := "read" // the operation that the user performs on the resource. - - if res, _ := e.Enforce(sub, obj, act); res { - // permit alice to read data1 - } else { - // deny the request, show an error - } - ``` - -3. Besides the static policy file, Casbin also provides API for permission management at run-time. For example, You can get all the roles assigned to a user as below: - - ```go - roles, _ := e.GetImplicitRolesForUser(sub) - ``` - -See [Policy management APIs](#policy-management) for more usage. - -## Policy management - -Casbin provides two sets of APIs to manage permissions: - -- [Management API](https://casbin.org/docs/management-api): the primitive API that provides full support for Casbin policy management. -- [RBAC API](https://casbin.org/docs/rbac-api): a more friendly API for RBAC. This API is a subset of Management API. The RBAC users could use this API to simplify the code. - -We also provide a [web-based UI](https://casbin.org/docs/admin-portal) for model management and policy management: - -![model editor](https://hsluoyz.github.io/casbin/ui_model_editor.png) - -![policy editor](https://hsluoyz.github.io/casbin/ui_policy_editor.png) - -## Policy persistence - -https://casbin.org/docs/adapters - -## Policy consistence between multiple nodes - -https://casbin.org/docs/watchers - -## Role manager - -https://casbin.org/docs/role-managers - -## Benchmarks - -https://casbin.org/docs/benchmark - -## Examples - -| Model | Model file | Policy file | -|---------------------------|----------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| ACL | [basic_model.conf](https://github.com/casbin/casbin/blob/master/examples/basic_model.conf) | [basic_policy.csv](https://github.com/casbin/casbin/blob/master/examples/basic_policy.csv) | -| ACL with superuser | [basic_model_with_root.conf](https://github.com/casbin/casbin/blob/master/examples/basic_with_root_model.conf) | [basic_policy.csv](https://github.com/casbin/casbin/blob/master/examples/basic_policy.csv) | -| ACL without users | [basic_model_without_users.conf](https://github.com/casbin/casbin/blob/master/examples/basic_without_users_model.conf) | [basic_policy_without_users.csv](https://github.com/casbin/casbin/blob/master/examples/basic_without_users_policy.csv) | -| ACL without resources | [basic_model_without_resources.conf](https://github.com/casbin/casbin/blob/master/examples/basic_without_resources_model.conf) | [basic_policy_without_resources.csv](https://github.com/casbin/casbin/blob/master/examples/basic_without_resources_policy.csv) | -| RBAC | [rbac_model.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_model.conf) | [rbac_policy.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_policy.csv) | -| RBAC with resource roles | [rbac_model_with_resource_roles.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_resource_roles_model.conf) | [rbac_policy_with_resource_roles.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_resource_roles_policy.csv) | -| RBAC with domains/tenants | [rbac_model_with_domains.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_domains_model.conf) | [rbac_policy_with_domains.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_domains_policy.csv) | -| ABAC | [abac_model.conf](https://github.com/casbin/casbin/blob/master/examples/abac_model.conf) | N/A | -| RESTful | [keymatch_model.conf](https://github.com/casbin/casbin/blob/master/examples/keymatch_model.conf) | [keymatch_policy.csv](https://github.com/casbin/casbin/blob/master/examples/keymatch_policy.csv) | -| Deny-override | [rbac_model_with_deny.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_deny_model.conf) | [rbac_policy_with_deny.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_deny_policy.csv) | -| Priority | [priority_model.conf](https://github.com/casbin/casbin/blob/master/examples/priority_model.conf) | [priority_policy.csv](https://github.com/casbin/casbin/blob/master/examples/priority_policy.csv) | - -## Middlewares - -Authz middlewares for web frameworks: https://casbin.org/docs/middlewares - -## Our adopters - -https://casbin.org/docs/adopters - -## How to Contribute - -Please read the [contributing guide](CONTRIBUTING.md). - -## Contributors - -This project exists thanks to all the people who contribute. - - -## Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/casbin#backer)] - - - -## Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/casbin#sponsor)] - - - - - - - - - - - - -## Star History - -[![Star History Chart](https://api.star-history.com/svg?repos=casbin/casbin&type=Date)](https://star-history.com/#casbin/casbin&Date) - -## License - -This project is licensed under the [Apache 2.0 license](LICENSE). - -## Contact - -If you have any issues or feature requests, please contact us. PR is welcomed. -- https://github.com/casbin/casbin/issues -- hsluoyz@gmail.com -- Tencent QQ group: [546057381](//shang.qq.com/wpa/qunwpa?idkey=8ac8b91fc97ace3d383d0035f7aa06f7d670fd8e8d4837347354a31c18fac885) diff --git a/vendor/github.com/casbin/casbin/v2/casbin-logo.png b/vendor/github.com/casbin/casbin/v2/casbin-logo.png deleted file mode 100644 index 7e5d1ecf..00000000 Binary files a/vendor/github.com/casbin/casbin/v2/casbin-logo.png and /dev/null differ diff --git a/vendor/github.com/casbin/casbin/v2/config/config.go b/vendor/github.com/casbin/casbin/v2/config/config.go deleted file mode 100644 index 57d40d84..00000000 --- a/vendor/github.com/casbin/casbin/v2/config/config.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -var ( - // DEFAULT_SECTION specifies the name of a section if no name provided. - DEFAULT_SECTION = "default" - // DEFAULT_COMMENT defines what character(s) indicate a comment `#`. - DEFAULT_COMMENT = []byte{'#'} - // DEFAULT_COMMENT_SEM defines what alternate character(s) indicate a comment `;`. - DEFAULT_COMMENT_SEM = []byte{';'} - // DEFAULT_MULTI_LINE_SEPARATOR defines what character indicates a multi-line content. - DEFAULT_MULTI_LINE_SEPARATOR = []byte{'\\'} -) - -// ConfigInterface defines the behavior of a Config implementation. -type ConfigInterface interface { - String(key string) string - Strings(key string) []string - Bool(key string) (bool, error) - Int(key string) (int, error) - Int64(key string) (int64, error) - Float64(key string) (float64, error) - Set(key string, value string) error -} - -// Config represents an implementation of the ConfigInterface. -type Config struct { - // Section:key=value - data map[string]map[string]string -} - -// NewConfig create an empty configuration representation from file. -func NewConfig(confName string) (ConfigInterface, error) { - c := &Config{ - data: make(map[string]map[string]string), - } - err := c.parse(confName) - return c, err -} - -// NewConfigFromText create an empty configuration representation from text. -func NewConfigFromText(text string) (ConfigInterface, error) { - c := &Config{ - data: make(map[string]map[string]string), - } - err := c.parseBuffer(bufio.NewReader(strings.NewReader(text))) - return c, err -} - -// AddConfig adds a new section->key:value to the configuration. -func (c *Config) AddConfig(section string, option string, value string) bool { - if section == "" { - section = DEFAULT_SECTION - } - - if _, ok := c.data[section]; !ok { - c.data[section] = make(map[string]string) - } - - _, ok := c.data[section][option] - c.data[section][option] = value - - return !ok -} - -func (c *Config) parse(fname string) (err error) { - f, err := os.Open(fname) - if err != nil { - return err - } - defer f.Close() - - buf := bufio.NewReader(f) - return c.parseBuffer(buf) -} - -func (c *Config) parseBuffer(buf *bufio.Reader) error { - var section string - var lineNum int - var buffer bytes.Buffer - var canWrite bool - for { - if canWrite { - if err := c.write(section, lineNum, &buffer); err != nil { - return err - } else { - canWrite = false - } - } - lineNum++ - line, _, err := buf.ReadLine() - if err == io.EOF { - // force write when buffer is not flushed yet - if buffer.Len() > 0 { - if err = c.write(section, lineNum, &buffer); err != nil { - return err - } - } - break - } else if err != nil { - return err - } - - line = bytes.TrimSpace(line) - switch { - case bytes.Equal(line, []byte{}), bytes.HasPrefix(line, DEFAULT_COMMENT_SEM), - bytes.HasPrefix(line, DEFAULT_COMMENT): - canWrite = true - continue - case bytes.HasPrefix(line, []byte{'['}) && bytes.HasSuffix(line, []byte{']'}): - // force write when buffer is not flushed yet - if buffer.Len() > 0 { - if err := c.write(section, lineNum, &buffer); err != nil { - return err - } - canWrite = false - } - section = string(line[1 : len(line)-1]) - default: - var p []byte - if bytes.HasSuffix(line, DEFAULT_MULTI_LINE_SEPARATOR) { - p = bytes.TrimSpace(line[:len(line)-1]) - p = append(p, " "...) - } else { - p = line - canWrite = true - } - - end := len(p) - for i, value := range p { - if value == DEFAULT_COMMENT[0] || value == DEFAULT_COMMENT_SEM[0] { - end = i - break - } - } - if _, err := buffer.Write(p[:end]); err != nil { - return err - } - } - } - - return nil -} - -func (c *Config) write(section string, lineNum int, b *bytes.Buffer) error { - if b.Len() <= 0 { - return nil - } - - optionVal := bytes.SplitN(b.Bytes(), []byte{'='}, 2) - if len(optionVal) != 2 { - return fmt.Errorf("parse the content error : line %d , %s = ? ", lineNum, optionVal[0]) - } - option := bytes.TrimSpace(optionVal[0]) - value := bytes.TrimSpace(optionVal[1]) - c.AddConfig(section, string(option), string(value)) - - // flush buffer after adding - b.Reset() - - return nil -} - -// Bool lookups up the value using the provided key and converts the value to a bool. -func (c *Config) Bool(key string) (bool, error) { - return strconv.ParseBool(c.get(key)) -} - -// Int lookups up the value using the provided key and converts the value to a int. -func (c *Config) Int(key string) (int, error) { - return strconv.Atoi(c.get(key)) -} - -// Int64 lookups up the value using the provided key and converts the value to a int64. -func (c *Config) Int64(key string) (int64, error) { - return strconv.ParseInt(c.get(key), 10, 64) -} - -// Float64 lookups up the value using the provided key and converts the value to a float64. -func (c *Config) Float64(key string) (float64, error) { - return strconv.ParseFloat(c.get(key), 64) -} - -// String lookups up the value using the provided key and converts the value to a string. -func (c *Config) String(key string) string { - return c.get(key) -} - -// Strings lookups up the value using the provided key and converts the value to an array of string -// by splitting the string by comma. -func (c *Config) Strings(key string) []string { - v := c.get(key) - if v == "" { - return nil - } - return strings.Split(v, ",") -} - -// Set sets the value for the specific key in the Config. -func (c *Config) Set(key string, value string) error { - if len(key) == 0 { - return errors.New("key is empty") - } - - var ( - section string - option string - ) - - keys := strings.Split(strings.ToLower(key), "::") - if len(keys) >= 2 { - section = keys[0] - option = keys[1] - } else { - option = keys[0] - } - - c.AddConfig(section, option, value) - return nil -} - -// section.key or key. -func (c *Config) get(key string) string { - var ( - section string - option string - ) - - keys := strings.Split(strings.ToLower(key), "::") - if len(keys) >= 2 { - section = keys[0] - option = keys[1] - } else { - section = DEFAULT_SECTION - option = keys[0] - } - - if value, ok := c.data[section][option]; ok { - return value - } - - return "" -} diff --git a/vendor/github.com/casbin/casbin/v2/constant/constants.go b/vendor/github.com/casbin/casbin/v2/constant/constants.go deleted file mode 100644 index 7a454aec..00000000 --- a/vendor/github.com/casbin/casbin/v2/constant/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2022 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package constant - -const ( - DomainIndex = "dom" - SubjectIndex = "sub" - ObjectIndex = "obj" - PriorityIndex = "priority" -) - -const ( - AllowOverrideEffect = "some(where (p_eft == allow))" - DenyOverrideEffect = "!some(where (p_eft == deny))" - AllowAndDenyEffect = "some(where (p_eft == allow)) && !some(where (p_eft == deny))" - PriorityEffect = "priority(p_eft) || deny" - SubjectPriorityEffect = "subjectPriority(p_eft) || deny" -) diff --git a/vendor/github.com/casbin/casbin/v2/effector/default_effector.go b/vendor/github.com/casbin/casbin/v2/effector/default_effector.go deleted file mode 100644 index feb083a6..00000000 --- a/vendor/github.com/casbin/casbin/v2/effector/default_effector.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2018 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package effector - -import ( - "errors" - - "github.com/casbin/casbin/v2/constant" -) - -// DefaultEffector is default effector for Casbin. -type DefaultEffector struct { -} - -// NewDefaultEffector is the constructor for DefaultEffector. -func NewDefaultEffector() *DefaultEffector { - e := DefaultEffector{} - return &e -} - -// MergeEffects merges all matching results collected by the enforcer into a single decision. -func (e *DefaultEffector) MergeEffects(expr string, effects []Effect, matches []float64, policyIndex int, policyLength int) (Effect, int, error) { - result := Indeterminate - explainIndex := -1 - - switch expr { - case constant.AllowOverrideEffect: - if matches[policyIndex] == 0 { - break - } - // only check the current policyIndex - if effects[policyIndex] == Allow { - result = Allow - explainIndex = policyIndex - break - } - case constant.DenyOverrideEffect: - // only check the current policyIndex - if matches[policyIndex] != 0 && effects[policyIndex] == Deny { - result = Deny - explainIndex = policyIndex - break - } - // if no deny rules are matched at last, then allow - if policyIndex == policyLength-1 { - result = Allow - } - case constant.AllowAndDenyEffect: - // short-circuit if matched deny rule - if matches[policyIndex] != 0 && effects[policyIndex] == Deny { - result = Deny - // set hit rule to the (first) matched deny rule - explainIndex = policyIndex - break - } - - // short-circuit some effects in the middle - if policyIndex < policyLength-1 { - // choose not to short-circuit - return result, explainIndex, nil - } - // merge all effects at last - for i, eft := range effects { - if matches[i] == 0 { - continue - } - - if eft == Allow { - result = Allow - // set hit rule to first matched allow rule - explainIndex = i - break - } - } - case constant.PriorityEffect, constant.SubjectPriorityEffect: - // reverse merge, short-circuit may be earlier - for i := len(effects) - 1; i >= 0; i-- { - if matches[i] == 0 { - continue - } - - if effects[i] != Indeterminate { - if effects[i] == Allow { - result = Allow - } else { - result = Deny - } - explainIndex = i - break - } - } - default: - return Deny, -1, errors.New("unsupported effect") - } - - return result, explainIndex, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/effector/effector.go b/vendor/github.com/casbin/casbin/v2/effector/effector.go deleted file mode 100644 index 49b84c3e..00000000 --- a/vendor/github.com/casbin/casbin/v2/effector/effector.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package effector //nolint:cyclop // TODO - -// Effect is the result for a policy rule. -type Effect int - -// Values for policy effect. -const ( - Allow Effect = iota - Indeterminate - Deny -) - -// Effector is the interface for Casbin effectors. -type Effector interface { - // MergeEffects merges all matching results collected by the enforcer into a single decision. - MergeEffects(expr string, effects []Effect, matches []float64, policyIndex int, policyLength int) (Effect, int, error) -} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer.go b/vendor/github.com/casbin/casbin/v2/enforcer.go deleted file mode 100644 index 2d90d31c..00000000 --- a/vendor/github.com/casbin/casbin/v2/enforcer.go +++ /dev/null @@ -1,986 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "errors" - "fmt" - "runtime/debug" - "strings" - "sync" - - "github.com/casbin/casbin/v2/effector" - "github.com/casbin/casbin/v2/log" - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" - fileadapter "github.com/casbin/casbin/v2/persist/file-adapter" - "github.com/casbin/casbin/v2/rbac" - defaultrolemanager "github.com/casbin/casbin/v2/rbac/default-role-manager" - "github.com/casbin/casbin/v2/util" - - "github.com/casbin/govaluate" -) - -// Enforcer is the main interface for authorization enforcement and policy management. -type Enforcer struct { - modelPath string - model model.Model - fm model.FunctionMap - eft effector.Effector - - adapter persist.Adapter - watcher persist.Watcher - dispatcher persist.Dispatcher - rmMap map[string]rbac.RoleManager - condRmMap map[string]rbac.ConditionalRoleManager - matcherMap sync.Map - - enabled bool - autoSave bool - autoBuildRoleLinks bool - autoNotifyWatcher bool - autoNotifyDispatcher bool - acceptJsonRequest bool - - logger log.Logger -} - -// EnforceContext is used as the first element of the parameter "rvals" in method "enforce". -type EnforceContext struct { - RType string - PType string - EType string - MType string -} - -func (e EnforceContext) GetCacheKey() string { - return "EnforceContext{" + e.RType + "-" + e.PType + "-" + e.EType + "-" + e.MType + "}" -} - -// NewEnforcer creates an enforcer via file or DB. -// -// File: -// -// e := casbin.NewEnforcer("path/to/basic_model.conf", "path/to/basic_policy.csv") -// -// MySQL DB: -// -// a := mysqladapter.NewDBAdapter("mysql", "mysql_username:mysql_password@tcp(127.0.0.1:3306)/") -// e := casbin.NewEnforcer("path/to/basic_model.conf", a) -func NewEnforcer(params ...interface{}) (*Enforcer, error) { - e := &Enforcer{logger: &log.DefaultLogger{}} - - parsedParamLen := 0 - paramLen := len(params) - if paramLen >= 1 { - enableLog, ok := params[paramLen-1].(bool) - if ok { - e.EnableLog(enableLog) - parsedParamLen++ - } - } - - if paramLen-parsedParamLen >= 1 { - logger, ok := params[paramLen-parsedParamLen-1].(log.Logger) - if ok { - e.logger = logger - parsedParamLen++ - } - } - - switch paramLen - parsedParamLen { - case 2: - switch p0 := params[0].(type) { - case string: - switch p1 := params[1].(type) { - case string: - err := e.InitWithFile(p0, p1) - if err != nil { - return nil, err - } - default: - err := e.InitWithAdapter(p0, p1.(persist.Adapter)) - if err != nil { - return nil, err - } - } - default: - switch params[1].(type) { - case string: - return nil, errors.New("invalid parameters for enforcer") - default: - err := e.InitWithModelAndAdapter(p0.(model.Model), params[1].(persist.Adapter)) - if err != nil { - return nil, err - } - } - } - case 1: - switch p0 := params[0].(type) { - case string: - err := e.InitWithFile(p0, "") - if err != nil { - return nil, err - } - default: - err := e.InitWithModelAndAdapter(p0.(model.Model), nil) - if err != nil { - return nil, err - } - } - case 0: - return e, nil - default: - return nil, errors.New("invalid parameters for enforcer") - } - - return e, nil -} - -// InitWithFile initializes an enforcer with a model file and a policy file. -func (e *Enforcer) InitWithFile(modelPath string, policyPath string) error { - a := fileadapter.NewAdapter(policyPath) - return e.InitWithAdapter(modelPath, a) -} - -// InitWithAdapter initializes an enforcer with a database adapter. -func (e *Enforcer) InitWithAdapter(modelPath string, adapter persist.Adapter) error { - m, err := model.NewModelFromFile(modelPath) - if err != nil { - return err - } - - err = e.InitWithModelAndAdapter(m, adapter) - if err != nil { - return err - } - - e.modelPath = modelPath - return nil -} - -// InitWithModelAndAdapter initializes an enforcer with a model and a database adapter. -func (e *Enforcer) InitWithModelAndAdapter(m model.Model, adapter persist.Adapter) error { - e.adapter = adapter - - e.model = m - m.SetLogger(e.logger) - e.model.PrintModel() - e.fm = model.LoadFunctionMap() - - e.initialize() - - // Do not initialize the full policy when using a filtered adapter - fa, ok := e.adapter.(persist.FilteredAdapter) - if e.adapter != nil && (!ok || ok && !fa.IsFiltered()) { - err := e.LoadPolicy() - if err != nil { - return err - } - } - - return nil -} - -// SetLogger changes the current enforcer's logger. -func (e *Enforcer) SetLogger(logger log.Logger) { - e.logger = logger - e.model.SetLogger(e.logger) - for k := range e.rmMap { - e.rmMap[k].SetLogger(e.logger) - } - for k := range e.condRmMap { - e.condRmMap[k].SetLogger(e.logger) - } -} - -func (e *Enforcer) initialize() { - e.rmMap = map[string]rbac.RoleManager{} - e.condRmMap = map[string]rbac.ConditionalRoleManager{} - e.eft = effector.NewDefaultEffector() - e.watcher = nil - e.matcherMap = sync.Map{} - - e.enabled = true - e.autoSave = true - e.autoBuildRoleLinks = true - e.autoNotifyWatcher = true - e.autoNotifyDispatcher = true - e.initRmMap() -} - -// LoadModel reloads the model from the model CONF file. -// Because the policy is attached to a model, so the policy is invalidated and needs to be reloaded by calling LoadPolicy(). -func (e *Enforcer) LoadModel() error { - var err error - e.model, err = model.NewModelFromFile(e.modelPath) - if err != nil { - return err - } - e.model.SetLogger(e.logger) - - e.model.PrintModel() - e.fm = model.LoadFunctionMap() - - e.initialize() - - return nil -} - -// GetModel gets the current model. -func (e *Enforcer) GetModel() model.Model { - return e.model -} - -// SetModel sets the current model. -func (e *Enforcer) SetModel(m model.Model) { - e.model = m - e.fm = model.LoadFunctionMap() - - e.model.SetLogger(e.logger) - e.initialize() -} - -// GetAdapter gets the current adapter. -func (e *Enforcer) GetAdapter() persist.Adapter { - return e.adapter -} - -// SetAdapter sets the current adapter. -func (e *Enforcer) SetAdapter(adapter persist.Adapter) { - e.adapter = adapter -} - -// SetWatcher sets the current watcher. -func (e *Enforcer) SetWatcher(watcher persist.Watcher) error { - e.watcher = watcher - if _, ok := e.watcher.(persist.WatcherEx); ok { - // The callback of WatcherEx has no generic implementation. - return nil - } else { - // In case the Watcher wants to use a customized callback function, call `SetUpdateCallback` after `SetWatcher`. - return watcher.SetUpdateCallback(func(string) { _ = e.LoadPolicy() }) - } -} - -// GetRoleManager gets the current role manager. -func (e *Enforcer) GetRoleManager() rbac.RoleManager { - if e.rmMap != nil && e.rmMap["g"] != nil { - return e.rmMap["g"] - } else { - return nil - } -} - -// GetNamedRoleManager gets the role manager for the named policy. -func (e *Enforcer) GetNamedRoleManager(ptype string) rbac.RoleManager { - if e.rmMap != nil && e.rmMap[ptype] != nil { - return e.rmMap[ptype] - } else { - return nil - } -} - -// SetRoleManager sets the current role manager. -func (e *Enforcer) SetRoleManager(rm rbac.RoleManager) { - e.invalidateMatcherMap() - e.rmMap["g"] = rm -} - -// SetNamedRoleManager sets the role manager for the named policy. -func (e *Enforcer) SetNamedRoleManager(ptype string, rm rbac.RoleManager) { - e.invalidateMatcherMap() - e.rmMap[ptype] = rm -} - -// SetEffector sets the current effector. -func (e *Enforcer) SetEffector(eft effector.Effector) { - e.eft = eft -} - -// ClearPolicy clears all policy. -func (e *Enforcer) ClearPolicy() { - e.invalidateMatcherMap() - - if e.dispatcher != nil && e.autoNotifyDispatcher { - _ = e.dispatcher.ClearPolicy() - return - } - e.model.ClearPolicy() -} - -// LoadPolicy reloads the policy from file/database. -func (e *Enforcer) LoadPolicy() error { - needToRebuild := false - newModel := e.model.Copy() - newModel.ClearPolicy() - - var err error - defer func() { - if err != nil { - if e.autoBuildRoleLinks && needToRebuild { - _ = e.BuildRoleLinks() - } - } - }() - - if err = e.adapter.LoadPolicy(newModel); err != nil && err.Error() != "invalid file path, file path cannot be empty" { - return err - } - - if err = newModel.SortPoliciesBySubjectHierarchy(); err != nil { - return err - } - - if err = newModel.SortPoliciesByPriority(); err != nil { - return err - } - - if e.autoBuildRoleLinks { - needToRebuild = true - if err := e.rebuildRoleLinks(newModel); err != nil { - return err - } - - if err := e.rebuildConditionalRoleLinks(newModel); err != nil { - return err - } - } - e.model = newModel - e.invalidateMatcherMap() - return nil -} - -func (e *Enforcer) rebuildRoleLinks(newModel model.Model) error { - if len(e.rmMap) != 0 { - for _, rm := range e.rmMap { - err := rm.Clear() - if err != nil { - return err - } - } - - err := newModel.BuildRoleLinks(e.rmMap) - if err != nil { - return err - } - } - - return nil -} - -func (e *Enforcer) rebuildConditionalRoleLinks(newModel model.Model) error { - if len(e.condRmMap) != 0 { - for _, crm := range e.condRmMap { - err := crm.Clear() - if err != nil { - return err - } - } - - err := newModel.BuildConditionalRoleLinks(e.condRmMap) - if err != nil { - return err - } - } - return nil -} - -func (e *Enforcer) loadFilteredPolicy(filter interface{}) error { - e.invalidateMatcherMap() - - var filteredAdapter persist.FilteredAdapter - - // Attempt to cast the Adapter as a FilteredAdapter - switch adapter := e.adapter.(type) { - case persist.FilteredAdapter: - filteredAdapter = adapter - default: - return errors.New("filtered policies are not supported by this adapter") - } - if err := filteredAdapter.LoadFilteredPolicy(e.model, filter); err != nil && err.Error() != "invalid file path, file path cannot be empty" { - return err - } - - if err := e.model.SortPoliciesBySubjectHierarchy(); err != nil { - return err - } - - if err := e.model.SortPoliciesByPriority(); err != nil { - return err - } - - e.initRmMap() - e.model.PrintPolicy() - if e.autoBuildRoleLinks { - err := e.BuildRoleLinks() - if err != nil { - return err - } - } - return nil -} - -// LoadFilteredPolicy reloads a filtered policy from file/database. -func (e *Enforcer) LoadFilteredPolicy(filter interface{}) error { - e.model.ClearPolicy() - - return e.loadFilteredPolicy(filter) -} - -// LoadIncrementalFilteredPolicy append a filtered policy from file/database. -func (e *Enforcer) LoadIncrementalFilteredPolicy(filter interface{}) error { - return e.loadFilteredPolicy(filter) -} - -// IsFiltered returns true if the loaded policy has been filtered. -func (e *Enforcer) IsFiltered() bool { - filteredAdapter, ok := e.adapter.(persist.FilteredAdapter) - if !ok { - return false - } - return filteredAdapter.IsFiltered() -} - -// SavePolicy saves the current policy (usually after changed with Casbin API) back to file/database. -func (e *Enforcer) SavePolicy() error { - if e.IsFiltered() { - return errors.New("cannot save a filtered policy") - } - if err := e.adapter.SavePolicy(e.model); err != nil { - return err - } - if e.watcher != nil { - var err error - if watcher, ok := e.watcher.(persist.WatcherEx); ok { - err = watcher.UpdateForSavePolicy(e.model) - } else { - err = e.watcher.Update() - } - return err - } - return nil -} - -func (e *Enforcer) initRmMap() { - for ptype, assertion := range e.model["g"] { - if rm, ok := e.rmMap[ptype]; ok { - _ = rm.Clear() - continue - } - if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) == 0 { - assertion.RM = defaultrolemanager.NewRoleManagerImpl(10) - e.rmMap[ptype] = assertion.RM - } - if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) != 0 { - assertion.CondRM = defaultrolemanager.NewConditionalRoleManager(10) - e.condRmMap[ptype] = assertion.CondRM - } - if len(assertion.Tokens) > 2 { - if len(assertion.ParamsTokens) == 0 { - assertion.RM = defaultrolemanager.NewRoleManager(10) - e.rmMap[ptype] = assertion.RM - } else { - assertion.CondRM = defaultrolemanager.NewConditionalDomainManager(10) - e.condRmMap[ptype] = assertion.CondRM - } - matchFun := "keyMatch(r_dom, p_dom)" - if strings.Contains(e.model["m"]["m"].Value, matchFun) { - e.AddNamedDomainMatchingFunc(ptype, "g", util.KeyMatch) - } - } - } -} - -// EnableEnforce changes the enforcing state of Casbin, when Casbin is disabled, all access will be allowed by the Enforce() function. -func (e *Enforcer) EnableEnforce(enable bool) { - e.enabled = enable -} - -// EnableLog changes whether Casbin will log messages to the Logger. -func (e *Enforcer) EnableLog(enable bool) { - e.logger.EnableLog(enable) -} - -// IsLogEnabled returns the current logger's enabled status. -func (e *Enforcer) IsLogEnabled() bool { - return e.logger.IsEnabled() -} - -// EnableAutoNotifyWatcher controls whether to save a policy rule automatically notify the Watcher when it is added or removed. -func (e *Enforcer) EnableAutoNotifyWatcher(enable bool) { - e.autoNotifyWatcher = enable -} - -// EnableAutoNotifyDispatcher controls whether to save a policy rule automatically notify the Dispatcher when it is added or removed. -func (e *Enforcer) EnableAutoNotifyDispatcher(enable bool) { - e.autoNotifyDispatcher = enable -} - -// EnableAutoSave controls whether to save a policy rule automatically to the adapter when it is added or removed. -func (e *Enforcer) EnableAutoSave(autoSave bool) { - e.autoSave = autoSave -} - -// EnableAutoBuildRoleLinks controls whether to rebuild the role inheritance relations when a role is added or deleted. -func (e *Enforcer) EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) { - e.autoBuildRoleLinks = autoBuildRoleLinks -} - -// EnableAcceptJsonRequest controls whether to accept json as a request parameter. -func (e *Enforcer) EnableAcceptJsonRequest(acceptJsonRequest bool) { - e.acceptJsonRequest = acceptJsonRequest -} - -// BuildRoleLinks manually rebuild the role inheritance relations. -func (e *Enforcer) BuildRoleLinks() error { - if e.rmMap == nil { - return errors.New("rmMap is nil") - } - for _, rm := range e.rmMap { - err := rm.Clear() - if err != nil { - return err - } - } - - return e.model.BuildRoleLinks(e.rmMap) -} - -// BuildIncrementalRoleLinks provides incremental build the role inheritance relations. -func (e *Enforcer) BuildIncrementalRoleLinks(op model.PolicyOp, ptype string, rules [][]string) error { - e.invalidateMatcherMap() - return e.model.BuildIncrementalRoleLinks(e.rmMap, op, "g", ptype, rules) -} - -// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations with conditions. -func (e *Enforcer) BuildIncrementalConditionalRoleLinks(op model.PolicyOp, ptype string, rules [][]string) error { - e.invalidateMatcherMap() - return e.model.BuildIncrementalConditionalRoleLinks(e.condRmMap, op, "g", ptype, rules) -} - -// NewEnforceContext Create a default structure based on the suffix. -func NewEnforceContext(suffix string) EnforceContext { - return EnforceContext{ - RType: "r" + suffix, - PType: "p" + suffix, - EType: "e" + suffix, - MType: "m" + suffix, - } -} - -func (e *Enforcer) invalidateMatcherMap() { - e.matcherMap = sync.Map{} -} - -// enforce use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". -func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interface{}) (ok bool, err error) { //nolint:funlen,cyclop,gocyclo // TODO: reduce function complexity - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("panic: %v\n%s", r, debug.Stack()) - } - }() - - if !e.enabled { - return true, nil - } - - functions := e.fm.GetFunctions() - if _, ok := e.model["g"]; ok { - for key, ast := range e.model["g"] { - // g must be a normal role definition (ast.RM != nil) - // or a conditional role definition (ast.CondRM != nil) - // ast.RM and ast.CondRM shouldn't be nil at the same time - if ast.RM != nil { - functions[key] = util.GenerateGFunction(ast.RM) - } - if ast.CondRM != nil { - functions[key] = util.GenerateConditionalGFunction(ast.CondRM) - } - } - } - - var ( - rType = "r" - pType = "p" - eType = "e" - mType = "m" - ) - if len(rvals) != 0 { - switch rvals[0].(type) { - case EnforceContext: - enforceContext := rvals[0].(EnforceContext) - rType = enforceContext.RType - pType = enforceContext.PType - eType = enforceContext.EType - mType = enforceContext.MType - rvals = rvals[1:] - default: - break - } - } - - var expString string - if matcher == "" { - expString = e.model["m"][mType].Value - } else { - expString = util.RemoveComments(util.EscapeAssertion(matcher)) - } - - rTokens := make(map[string]int, len(e.model["r"][rType].Tokens)) - for i, token := range e.model["r"][rType].Tokens { - rTokens[token] = i - } - pTokens := make(map[string]int, len(e.model["p"][pType].Tokens)) - for i, token := range e.model["p"][pType].Tokens { - pTokens[token] = i - } - - if e.acceptJsonRequest { - // try to parse all request values from json to map[string]interface{} - // skip if there is an error - for i, rval := range rvals { - switch rval := rval.(type) { - case string: - var mapValue map[string]interface{} - mapValue, err = util.JsonToMap(rval) - if err == nil { - rvals[i] = mapValue - } - } - } - } - - parameters := enforceParameters{ - rTokens: rTokens, - rVals: rvals, - - pTokens: pTokens, - } - - hasEval := util.HasEval(expString) - if hasEval { - functions["eval"] = generateEvalFunction(functions, ¶meters) - } - var expression *govaluate.EvaluableExpression - expression, err = e.getAndStoreMatcherExpression(hasEval, expString, functions) - if err != nil { - return false, err - } - - if len(e.model["r"][rType].Tokens) != len(rvals) { - return false, fmt.Errorf( - "invalid request size: expected %d, got %d, rvals: %v", - len(e.model["r"][rType].Tokens), - len(rvals), - rvals) - } - - var policyEffects []effector.Effect - var matcherResults []float64 - - var effect effector.Effect - var explainIndex int - - if policyLen := len(e.model["p"][pType].Policy); policyLen != 0 && strings.Contains(expString, pType+"_") { //nolint:nestif // TODO: reduce function complexity - policyEffects = make([]effector.Effect, policyLen) - matcherResults = make([]float64, policyLen) - - for policyIndex, pvals := range e.model["p"][pType].Policy { - // log.LogPrint("Policy Rule: ", pvals) - if len(e.model["p"][pType].Tokens) != len(pvals) { - return false, fmt.Errorf( - "invalid policy size: expected %d, got %d, pvals: %v", - len(e.model["p"][pType].Tokens), - len(pvals), - pvals) - } - - parameters.pVals = pvals - - result, err := expression.Eval(parameters) - // log.LogPrint("Result: ", result) - - if err != nil { - return false, err - } - - // set to no-match at first - matcherResults[policyIndex] = 0 - switch result := result.(type) { - case bool: - if result { - matcherResults[policyIndex] = 1 - } - case float64: - if result != 0 { - matcherResults[policyIndex] = 1 - } - default: - return false, errors.New("matcher result should be bool, int or float") - } - - if j, ok := parameters.pTokens[pType+"_eft"]; ok { - eft := parameters.pVals[j] - if eft == "allow" { - policyEffects[policyIndex] = effector.Allow - } else if eft == "deny" { - policyEffects[policyIndex] = effector.Deny - } else { - policyEffects[policyIndex] = effector.Indeterminate - } - } else { - policyEffects[policyIndex] = effector.Allow - } - - // if e.model["e"]["e"].Value == "priority(p_eft) || deny" { - // break - // } - - effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, policyIndex, policyLen) - if err != nil { - return false, err - } - if effect != effector.Indeterminate { - break - } - } - } else { - if hasEval && len(e.model["p"][pType].Policy) == 0 { - return false, errors.New("please make sure rule exists in policy when using eval() in matcher") - } - - policyEffects = make([]effector.Effect, 1) - matcherResults = make([]float64, 1) - matcherResults[0] = 1 - - parameters.pVals = make([]string, len(parameters.pTokens)) - - result, err := expression.Eval(parameters) - - if err != nil { - return false, err - } - - if result.(bool) { - policyEffects[0] = effector.Allow - } else { - policyEffects[0] = effector.Indeterminate - } - - effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, 0, 1) - if err != nil { - return false, err - } - } - - var logExplains [][]string - - if explains != nil { - if len(*explains) > 0 { - logExplains = append(logExplains, *explains) - } - - if explainIndex != -1 && len(e.model["p"][pType].Policy) > explainIndex { - *explains = e.model["p"][pType].Policy[explainIndex] - logExplains = append(logExplains, *explains) - } - } - - // effect -> result - result := false - if effect == effector.Allow { - result = true - } - e.logger.LogEnforce(expString, rvals, result, logExplains) - - return result, nil -} - -func (e *Enforcer) getAndStoreMatcherExpression(hasEval bool, expString string, functions map[string]govaluate.ExpressionFunction) (*govaluate.EvaluableExpression, error) { - var expression *govaluate.EvaluableExpression - var err error - var cachedExpression, isPresent = e.matcherMap.Load(expString) - - if !hasEval && isPresent { - expression = cachedExpression.(*govaluate.EvaluableExpression) - } else { - expression, err = govaluate.NewEvaluableExpressionWithFunctions(expString, functions) - if err != nil { - return nil, err - } - e.matcherMap.Store(expString, expression) - } - return expression, nil -} - -// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). -func (e *Enforcer) Enforce(rvals ...interface{}) (bool, error) { - return e.enforce("", nil, rvals...) -} - -// EnforceWithMatcher use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". -func (e *Enforcer) EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) { - return e.enforce(matcher, nil, rvals...) -} - -// EnforceEx explain enforcement by informing matched rules. -func (e *Enforcer) EnforceEx(rvals ...interface{}) (bool, []string, error) { - explain := []string{} - result, err := e.enforce("", &explain, rvals...) - return result, explain, err -} - -// EnforceExWithMatcher use a custom matcher and explain enforcement by informing matched rules. -func (e *Enforcer) EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) { - explain := []string{} - result, err := e.enforce(matcher, &explain, rvals...) - return result, explain, err -} - -// BatchEnforce enforce in batches. -func (e *Enforcer) BatchEnforce(requests [][]interface{}) ([]bool, error) { - var results []bool - for _, request := range requests { - result, err := e.enforce("", nil, request...) - if err != nil { - return results, err - } - results = append(results, result) - } - return results, nil -} - -// BatchEnforceWithMatcher enforce with matcher in batches. -func (e *Enforcer) BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) { - var results []bool - for _, request := range requests { - result, err := e.enforce(matcher, nil, request...) - if err != nil { - return results, err - } - results = append(results, result) - } - return results, nil -} - -// AddNamedMatchingFunc add MatchingFunc by ptype RoleManager. -func (e *Enforcer) AddNamedMatchingFunc(ptype, name string, fn rbac.MatchingFunc) bool { - if rm, ok := e.rmMap[ptype]; ok { - rm.AddMatchingFunc(name, fn) - return true - } - return false -} - -// AddNamedDomainMatchingFunc add MatchingFunc by ptype to RoleManager. -func (e *Enforcer) AddNamedDomainMatchingFunc(ptype, name string, fn rbac.MatchingFunc) bool { - if rm, ok := e.rmMap[ptype]; ok { - rm.AddDomainMatchingFunc(name, fn) - return true - } - return false -} - -// AddNamedLinkConditionFunc Add condition function fn for Link userName->roleName, -// when fn returns true, Link is valid, otherwise invalid. -func (e *Enforcer) AddNamedLinkConditionFunc(ptype, user, role string, fn rbac.LinkConditionFunc) bool { - if rm, ok := e.condRmMap[ptype]; ok { - rm.AddLinkConditionFunc(user, role, fn) - return true - } - return false -} - -// AddNamedDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, -// when fn returns true, Link is valid, otherwise invalid. -func (e *Enforcer) AddNamedDomainLinkConditionFunc(ptype, user, role string, domain string, fn rbac.LinkConditionFunc) bool { - if rm, ok := e.condRmMap[ptype]; ok { - rm.AddDomainLinkConditionFunc(user, role, domain, fn) - return true - } - return false -} - -// SetNamedLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName. -func (e *Enforcer) SetNamedLinkConditionFuncParams(ptype, user, role string, params ...string) bool { - if rm, ok := e.condRmMap[ptype]; ok { - rm.SetLinkConditionFuncParams(user, role, params...) - return true - } - return false -} - -// SetNamedDomainLinkConditionFuncParams Sets the parameters of the condition function fn -// for Link userName->{roleName, domain}. -func (e *Enforcer) SetNamedDomainLinkConditionFuncParams(ptype, user, role, domain string, params ...string) bool { - if rm, ok := e.condRmMap[ptype]; ok { - rm.SetDomainLinkConditionFuncParams(user, role, domain, params...) - return true - } - return false -} - -// assumes bounds have already been checked. -type enforceParameters struct { - rTokens map[string]int - rVals []interface{} - - pTokens map[string]int - pVals []string -} - -// implements govaluate.Parameters. -func (p enforceParameters) Get(name string) (interface{}, error) { - if name == "" { - return nil, nil - } - - switch name[0] { - case 'p': - i, ok := p.pTokens[name] - if !ok { - return nil, errors.New("No parameter '" + name + "' found.") - } - return p.pVals[i], nil - case 'r': - i, ok := p.rTokens[name] - if !ok { - return nil, errors.New("No parameter '" + name + "' found.") - } - return p.rVals[i], nil - default: - return nil, errors.New("No parameter '" + name + "' found.") - } -} - -func generateEvalFunction(functions map[string]govaluate.ExpressionFunction, parameters *enforceParameters) govaluate.ExpressionFunction { - return func(args ...interface{}) (interface{}, error) { - if len(args) != 1 { - return nil, fmt.Errorf("function eval(subrule string) expected %d arguments, but got %d", 1, len(args)) - } - - expression, ok := args[0].(string) - if !ok { - return nil, errors.New("argument of eval(subrule string) must be a string") - } - expression = util.EscapeAssertion(expression) - expr, err := govaluate.NewEvaluableExpressionWithFunctions(expression, functions) - if err != nil { - return nil, fmt.Errorf("error while parsing eval parameter: %s, %s", expression, err.Error()) - } - return expr.Eval(parameters) - } -} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_cached.go b/vendor/github.com/casbin/casbin/v2/enforcer_cached.go deleted file mode 100644 index 1e72a976..00000000 --- a/vendor/github.com/casbin/casbin/v2/enforcer_cached.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2018 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/casbin/casbin/v2/persist/cache" -) - -// CachedEnforcer wraps Enforcer and provides decision cache. -type CachedEnforcer struct { - *Enforcer - expireTime time.Duration - cache cache.Cache - enableCache int32 - locker *sync.RWMutex -} - -type CacheableParam interface { - GetCacheKey() string -} - -// NewCachedEnforcer creates a cached enforcer via file or DB. -func NewCachedEnforcer(params ...interface{}) (*CachedEnforcer, error) { - e := &CachedEnforcer{} - var err error - e.Enforcer, err = NewEnforcer(params...) - if err != nil { - return nil, err - } - - e.enableCache = 1 - e.cache, _ = cache.NewDefaultCache() - e.locker = new(sync.RWMutex) - return e, nil -} - -// EnableCache determines whether to enable cache on Enforce(). When enableCache is enabled, cached result (true | false) will be returned for previous decisions. -func (e *CachedEnforcer) EnableCache(enableCache bool) { - var enabled int32 - if enableCache { - enabled = 1 - } - atomic.StoreInt32(&e.enableCache, enabled) -} - -// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). -// if rvals is not string , ignore the cache. -func (e *CachedEnforcer) Enforce(rvals ...interface{}) (bool, error) { - if atomic.LoadInt32(&e.enableCache) == 0 { - return e.Enforcer.Enforce(rvals...) - } - - key, ok := e.getKey(rvals...) - if !ok { - return e.Enforcer.Enforce(rvals...) - } - - if res, err := e.getCachedResult(key); err == nil { - return res, nil - } else if err != cache.ErrNoSuchKey { - return res, err - } - - res, err := e.Enforcer.Enforce(rvals...) - if err != nil { - return false, err - } - - err = e.setCachedResult(key, res, e.expireTime) - return res, err -} - -func (e *CachedEnforcer) LoadPolicy() error { - if atomic.LoadInt32(&e.enableCache) != 0 { - if err := e.cache.Clear(); err != nil { - return err - } - } - return e.Enforcer.LoadPolicy() -} - -func (e *CachedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { - if atomic.LoadInt32(&e.enableCache) != 0 { - key, ok := e.getKey(params...) - if ok { - if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { - return false, err - } - } - } - return e.Enforcer.RemovePolicy(params...) -} - -func (e *CachedEnforcer) RemovePolicies(rules [][]string) (bool, error) { - if len(rules) != 0 { - if atomic.LoadInt32(&e.enableCache) != 0 { - irule := make([]interface{}, len(rules[0])) - for _, rule := range rules { - for i, param := range rule { - irule[i] = param - } - key, _ := e.getKey(irule...) - if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { - return false, err - } - } - } - } - return e.Enforcer.RemovePolicies(rules) -} - -func (e *CachedEnforcer) getCachedResult(key string) (res bool, err error) { - e.locker.Lock() - defer e.locker.Unlock() - return e.cache.Get(key) -} - -func (e *CachedEnforcer) SetExpireTime(expireTime time.Duration) { - e.expireTime = expireTime -} - -func (e *CachedEnforcer) SetCache(c cache.Cache) { - e.cache = c -} - -func (e *CachedEnforcer) setCachedResult(key string, res bool, extra ...interface{}) error { - e.locker.Lock() - defer e.locker.Unlock() - return e.cache.Set(key, res, extra...) -} - -func (e *CachedEnforcer) getKey(params ...interface{}) (string, bool) { - return GetCacheKey(params...) -} - -// InvalidateCache deletes all the existing cached decisions. -func (e *CachedEnforcer) InvalidateCache() error { - e.locker.Lock() - defer e.locker.Unlock() - return e.cache.Clear() -} - -func GetCacheKey(params ...interface{}) (string, bool) { - key := strings.Builder{} - for _, param := range params { - switch typedParam := param.(type) { - case string: - key.WriteString(typedParam) - case CacheableParam: - key.WriteString(typedParam.GetCacheKey()) - default: - return "", false - } - key.WriteString("$$") - } - return key.String(), true -} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go b/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go deleted file mode 100644 index 0032460f..00000000 --- a/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2018 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/casbin/casbin/v2/persist/cache" -) - -// SyncedCachedEnforcer wraps Enforcer and provides decision sync cache. -type SyncedCachedEnforcer struct { - *SyncedEnforcer - expireTime time.Duration - cache cache.Cache - enableCache int32 - locker *sync.RWMutex -} - -// NewSyncedCachedEnforcer creates a sync cached enforcer via file or DB. -func NewSyncedCachedEnforcer(params ...interface{}) (*SyncedCachedEnforcer, error) { - e := &SyncedCachedEnforcer{} - var err error - e.SyncedEnforcer, err = NewSyncedEnforcer(params...) - if err != nil { - return nil, err - } - - e.enableCache = 1 - e.cache, _ = cache.NewSyncCache() - e.locker = new(sync.RWMutex) - return e, nil -} - -// EnableCache determines whether to enable cache on Enforce(). When enableCache is enabled, cached result (true | false) will be returned for previous decisions. -func (e *SyncedCachedEnforcer) EnableCache(enableCache bool) { - var enabled int32 - if enableCache { - enabled = 1 - } - atomic.StoreInt32(&e.enableCache, enabled) -} - -// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). -// if rvals is not string , ignore the cache. -func (e *SyncedCachedEnforcer) Enforce(rvals ...interface{}) (bool, error) { - if atomic.LoadInt32(&e.enableCache) == 0 { - return e.SyncedEnforcer.Enforce(rvals...) - } - - key, ok := e.getKey(rvals...) - if !ok { - return e.SyncedEnforcer.Enforce(rvals...) - } - - if res, err := e.getCachedResult(key); err == nil { - return res, nil - } else if err != cache.ErrNoSuchKey { - return res, err - } - - res, err := e.SyncedEnforcer.Enforce(rvals...) - if err != nil { - return false, err - } - - err = e.setCachedResult(key, res, e.expireTime) - return res, err -} - -func (e *SyncedCachedEnforcer) LoadPolicy() error { - if atomic.LoadInt32(&e.enableCache) != 0 { - if err := e.cache.Clear(); err != nil { - return err - } - } - return e.SyncedEnforcer.LoadPolicy() -} - -func (e *SyncedCachedEnforcer) AddPolicy(params ...interface{}) (bool, error) { - if ok, err := e.checkOneAndRemoveCache(params...); !ok { - return ok, err - } - return e.SyncedEnforcer.AddPolicy(params...) -} - -func (e *SyncedCachedEnforcer) AddPolicies(rules [][]string) (bool, error) { - if ok, err := e.checkManyAndRemoveCache(rules); !ok { - return ok, err - } - return e.SyncedEnforcer.AddPolicies(rules) -} - -func (e *SyncedCachedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { - if ok, err := e.checkOneAndRemoveCache(params...); !ok { - return ok, err - } - return e.SyncedEnforcer.RemovePolicy(params...) -} - -func (e *SyncedCachedEnforcer) RemovePolicies(rules [][]string) (bool, error) { - if ok, err := e.checkManyAndRemoveCache(rules); !ok { - return ok, err - } - return e.SyncedEnforcer.RemovePolicies(rules) -} - -func (e *SyncedCachedEnforcer) getCachedResult(key string) (res bool, err error) { - return e.cache.Get(key) -} - -func (e *SyncedCachedEnforcer) SetExpireTime(expireTime time.Duration) { - e.locker.Lock() - defer e.locker.Unlock() - e.expireTime = expireTime -} - -// SetCache need to be sync cache. -func (e *SyncedCachedEnforcer) SetCache(c cache.Cache) { - e.locker.Lock() - defer e.locker.Unlock() - e.cache = c -} - -func (e *SyncedCachedEnforcer) setCachedResult(key string, res bool, extra ...interface{}) error { - return e.cache.Set(key, res, extra...) -} - -func (e *SyncedCachedEnforcer) getKey(params ...interface{}) (string, bool) { - return GetCacheKey(params...) -} - -// InvalidateCache deletes all the existing cached decisions. -func (e *SyncedCachedEnforcer) InvalidateCache() error { - return e.cache.Clear() -} - -func (e *SyncedCachedEnforcer) checkOneAndRemoveCache(params ...interface{}) (bool, error) { - if atomic.LoadInt32(&e.enableCache) != 0 { - key, ok := e.getKey(params...) - if ok { - if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { - return false, err - } - } - } - return true, nil -} - -func (e *SyncedCachedEnforcer) checkManyAndRemoveCache(rules [][]string) (bool, error) { - if len(rules) != 0 { - if atomic.LoadInt32(&e.enableCache) != 0 { - irule := make([]interface{}, len(rules[0])) - for _, rule := range rules { - for i, param := range rule { - irule[i] = param - } - key, _ := e.getKey(irule...) - if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { - return false, err - } - } - } - } - return true, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go b/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go deleted file mode 100644 index 09f66723..00000000 --- a/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go +++ /dev/null @@ -1,239 +0,0 @@ -package casbin - -import ( - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" -) - -// DistributedEnforcer wraps SyncedEnforcer for dispatcher. -type DistributedEnforcer struct { - *SyncedEnforcer -} - -func NewDistributedEnforcer(params ...interface{}) (*DistributedEnforcer, error) { - e := &DistributedEnforcer{} - var err error - e.SyncedEnforcer, err = NewSyncedEnforcer(params...) - if err != nil { - return nil, err - } - - return e, nil -} - -// SetDispatcher sets the current dispatcher. -func (d *DistributedEnforcer) SetDispatcher(dispatcher persist.Dispatcher) { - d.dispatcher = dispatcher -} - -// AddPoliciesSelf provides a method for dispatcher to add authorization rules to the current policy. -// The function returns the rules affected and error. -func (d *DistributedEnforcer) AddPoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) { - d.m.Lock() - defer d.m.Unlock() - if shouldPersist != nil && shouldPersist() { - var noExistsPolicy [][]string - for _, rule := range rules { - var hasPolicy bool - hasPolicy, err = d.model.HasPolicy(sec, ptype, rule) - if err != nil { - return nil, err - } - if !hasPolicy { - noExistsPolicy = append(noExistsPolicy, rule) - } - } - - if err = d.adapter.(persist.BatchAdapter).AddPolicies(sec, ptype, noExistsPolicy); err != nil && err.Error() != notImplemented { - return nil, err - } - } - - affected, err = d.model.AddPoliciesWithAffected(sec, ptype, rules) - if err != nil { - return affected, err - } - - if sec == "g" { - err := d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, affected) - if err != nil { - return affected, err - } - } - - return affected, nil -} - -// RemovePoliciesSelf provides a method for dispatcher to remove a set of rules from current policy. -// The function returns the rules affected and error. -func (d *DistributedEnforcer) RemovePoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) { - d.m.Lock() - defer d.m.Unlock() - if shouldPersist != nil && shouldPersist() { - if err = d.adapter.(persist.BatchAdapter).RemovePolicies(sec, ptype, rules); err != nil { - if err.Error() != notImplemented { - return nil, err - } - } - } - - affected, err = d.model.RemovePoliciesWithAffected(sec, ptype, rules) - if err != nil { - return affected, err - } - - if sec == "g" { - err = d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, affected) - if err != nil { - return affected, err - } - } - - return affected, err -} - -// RemoveFilteredPolicySelf provides a method for dispatcher to remove an authorization rule from the current policy, field filters can be specified. -// The function returns the rules affected and error. -func (d *DistributedEnforcer) RemoveFilteredPolicySelf(shouldPersist func() bool, sec string, ptype string, fieldIndex int, fieldValues ...string) (affected [][]string, err error) { - d.m.Lock() - defer d.m.Unlock() - if shouldPersist != nil && shouldPersist() { - if err = d.adapter.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...); err != nil { - if err.Error() != notImplemented { - return nil, err - } - } - } - - _, affected, err = d.model.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) - if err != nil { - return affected, err - } - - if sec == "g" { - err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, affected) - if err != nil { - return affected, err - } - } - - return affected, nil -} - -// ClearPolicySelf provides a method for dispatcher to clear all rules from the current policy. -func (d *DistributedEnforcer) ClearPolicySelf(shouldPersist func() bool) error { - d.m.Lock() - defer d.m.Unlock() - if shouldPersist != nil && shouldPersist() { - err := d.adapter.SavePolicy(nil) - if err != nil { - return err - } - } - - d.model.ClearPolicy() - - return nil -} - -// UpdatePolicySelf provides a method for dispatcher to update an authorization rule from the current policy. -func (d *DistributedEnforcer) UpdatePolicySelf(shouldPersist func() bool, sec string, ptype string, oldRule, newRule []string) (affected bool, err error) { - d.m.Lock() - defer d.m.Unlock() - if shouldPersist != nil && shouldPersist() { - err = d.adapter.(persist.UpdatableAdapter).UpdatePolicy(sec, ptype, oldRule, newRule) - if err != nil { - return false, err - } - } - - ruleUpdated, err := d.model.UpdatePolicy(sec, ptype, oldRule, newRule) - if !ruleUpdated || err != nil { - return ruleUpdated, err - } - - if sec == "g" { - err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{oldRule}) // remove the old rule - if err != nil { - return ruleUpdated, err - } - err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{newRule}) // add the new rule - if err != nil { - return ruleUpdated, err - } - } - - return ruleUpdated, nil -} - -// UpdatePoliciesSelf provides a method for dispatcher to update a set of authorization rules from the current policy. -func (d *DistributedEnforcer) UpdatePoliciesSelf(shouldPersist func() bool, sec string, ptype string, oldRules, newRules [][]string) (affected bool, err error) { - d.m.Lock() - defer d.m.Unlock() - if shouldPersist != nil && shouldPersist() { - err = d.adapter.(persist.UpdatableAdapter).UpdatePolicies(sec, ptype, oldRules, newRules) - if err != nil { - return false, err - } - } - - ruleUpdated, err := d.model.UpdatePolicies(sec, ptype, oldRules, newRules) - if !ruleUpdated || err != nil { - return ruleUpdated, err - } - - if sec == "g" { - err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rule - if err != nil { - return ruleUpdated, err - } - err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rule - if err != nil { - return ruleUpdated, err - } - } - - return ruleUpdated, nil -} - -// UpdateFilteredPoliciesSelf provides a method for dispatcher to update a set of authorization rules from the current policy. -func (d *DistributedEnforcer) UpdateFilteredPoliciesSelf(shouldPersist func() bool, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) { - d.m.Lock() - defer d.m.Unlock() - var ( - oldRules [][]string - err error - ) - if shouldPersist != nil && shouldPersist() { - oldRules, err = d.adapter.(persist.UpdatableAdapter).UpdateFilteredPolicies(sec, ptype, newRules, fieldIndex, fieldValues...) - if err != nil { - return false, err - } - } - - ruleChanged, err := d.model.RemovePolicies(sec, ptype, oldRules) - if err != nil { - return ruleChanged, err - } - err = d.model.AddPolicies(sec, ptype, newRules) - if err != nil { - return ruleChanged, err - } - ruleChanged = ruleChanged && len(newRules) != 0 - if !ruleChanged { - return ruleChanged, nil - } - - if sec == "g" { - err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rule - if err != nil { - return ruleChanged, err - } - err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rule - if err != nil { - return ruleChanged, err - } - } - - return true, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_interface.go b/vendor/github.com/casbin/casbin/v2/enforcer_interface.go deleted file mode 100644 index d22dcf10..00000000 --- a/vendor/github.com/casbin/casbin/v2/enforcer_interface.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2019 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "github.com/casbin/casbin/v2/effector" - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" - "github.com/casbin/casbin/v2/rbac" - "github.com/casbin/govaluate" -) - -var _ IEnforcer = &Enforcer{} -var _ IEnforcer = &SyncedEnforcer{} -var _ IEnforcer = &CachedEnforcer{} - -// IEnforcer is the API interface of Enforcer. -type IEnforcer interface { - /* Enforcer API */ - InitWithFile(modelPath string, policyPath string) error - InitWithAdapter(modelPath string, adapter persist.Adapter) error - InitWithModelAndAdapter(m model.Model, adapter persist.Adapter) error - LoadModel() error - GetModel() model.Model - SetModel(m model.Model) - GetAdapter() persist.Adapter - SetAdapter(adapter persist.Adapter) - SetWatcher(watcher persist.Watcher) error - GetRoleManager() rbac.RoleManager - SetRoleManager(rm rbac.RoleManager) - SetEffector(eft effector.Effector) - ClearPolicy() - LoadPolicy() error - LoadFilteredPolicy(filter interface{}) error - LoadIncrementalFilteredPolicy(filter interface{}) error - IsFiltered() bool - SavePolicy() error - EnableEnforce(enable bool) - EnableLog(enable bool) - EnableAutoNotifyWatcher(enable bool) - EnableAutoSave(autoSave bool) - EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) - BuildRoleLinks() error - Enforce(rvals ...interface{}) (bool, error) - EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) - EnforceEx(rvals ...interface{}) (bool, []string, error) - EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) - BatchEnforce(requests [][]interface{}) ([]bool, error) - BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) - - /* RBAC API */ - GetRolesForUser(name string, domain ...string) ([]string, error) - GetUsersForRole(name string, domain ...string) ([]string, error) - HasRoleForUser(name string, role string, domain ...string) (bool, error) - AddRoleForUser(user string, role string, domain ...string) (bool, error) - AddPermissionForUser(user string, permission ...string) (bool, error) - AddPermissionsForUser(user string, permissions ...[]string) (bool, error) - DeletePermissionForUser(user string, permission ...string) (bool, error) - DeletePermissionsForUser(user string) (bool, error) - GetPermissionsForUser(user string, domain ...string) ([][]string, error) - HasPermissionForUser(user string, permission ...string) (bool, error) - GetImplicitRolesForUser(name string, domain ...string) ([]string, error) - GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) - GetImplicitUsersForPermission(permission ...string) ([]string, error) - DeleteRoleForUser(user string, role string, domain ...string) (bool, error) - DeleteRolesForUser(user string, domain ...string) (bool, error) - DeleteUser(user string) (bool, error) - DeleteRole(role string) (bool, error) - DeletePermission(permission ...string) (bool, error) - - /* RBAC API with domains*/ - GetUsersForRoleInDomain(name string, domain string) []string - GetRolesForUserInDomain(name string, domain string) []string - GetPermissionsForUserInDomain(user string, domain string) [][]string - AddRoleForUserInDomain(user string, role string, domain string) (bool, error) - DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) - GetAllUsersByDomain(domain string) ([]string, error) - DeleteRolesForUserInDomain(user string, domain string) (bool, error) - DeleteAllUsersByDomain(domain string) (bool, error) - DeleteDomains(domains ...string) (bool, error) - GetAllDomains() ([]string, error) - GetAllRolesByDomain(domain string) ([]string, error) - - /* Management API */ - GetAllSubjects() ([]string, error) - GetAllNamedSubjects(ptype string) ([]string, error) - GetAllObjects() ([]string, error) - GetAllNamedObjects(ptype string) ([]string, error) - GetAllActions() ([]string, error) - GetAllNamedActions(ptype string) ([]string, error) - GetAllRoles() ([]string, error) - GetAllNamedRoles(ptype string) ([]string, error) - GetPolicy() ([][]string, error) - GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) - GetNamedPolicy(ptype string) ([][]string, error) - GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) - GetGroupingPolicy() ([][]string, error) - GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) - GetNamedGroupingPolicy(ptype string) ([][]string, error) - GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) - HasPolicy(params ...interface{}) (bool, error) - HasNamedPolicy(ptype string, params ...interface{}) (bool, error) - AddPolicy(params ...interface{}) (bool, error) - AddPolicies(rules [][]string) (bool, error) - AddNamedPolicy(ptype string, params ...interface{}) (bool, error) - AddNamedPolicies(ptype string, rules [][]string) (bool, error) - AddPoliciesEx(rules [][]string) (bool, error) - AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) - RemovePolicy(params ...interface{}) (bool, error) - RemovePolicies(rules [][]string) (bool, error) - RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) - RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) - RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) - RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) - HasGroupingPolicy(params ...interface{}) (bool, error) - HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) - AddGroupingPolicy(params ...interface{}) (bool, error) - AddGroupingPolicies(rules [][]string) (bool, error) - AddGroupingPoliciesEx(rules [][]string) (bool, error) - AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) - AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) - AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) - RemoveGroupingPolicy(params ...interface{}) (bool, error) - RemoveGroupingPolicies(rules [][]string) (bool, error) - RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) - RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) - RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) - RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) - AddFunction(name string, function govaluate.ExpressionFunction) - - UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) - UpdatePolicies(oldPolicies [][]string, newPolicies [][]string) (bool, error) - UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) - - UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) - UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) - UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) - UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) - - /* Management API with autoNotifyWatcher disabled */ - SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) - SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) - SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) - SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) - SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) - SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) - SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) - SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) -} - -var _ IDistributedEnforcer = &DistributedEnforcer{} - -// IDistributedEnforcer defines dispatcher enforcer. -type IDistributedEnforcer interface { - IEnforcer - SetDispatcher(dispatcher persist.Dispatcher) - /* Management API for DistributedEnforcer*/ - AddPoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) - RemovePoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) - RemoveFilteredPolicySelf(shouldPersist func() bool, sec string, ptype string, fieldIndex int, fieldValues ...string) (affected [][]string, err error) - ClearPolicySelf(shouldPersist func() bool) error - UpdatePolicySelf(shouldPersist func() bool, sec string, ptype string, oldRule, newRule []string) (affected bool, err error) - UpdatePoliciesSelf(shouldPersist func() bool, sec string, ptype string, oldRules, newRules [][]string) (affected bool, err error) - UpdateFilteredPoliciesSelf(shouldPersist func() bool, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) -} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_synced.go b/vendor/github.com/casbin/casbin/v2/enforcer_synced.go deleted file mode 100644 index 985b1743..00000000 --- a/vendor/github.com/casbin/casbin/v2/enforcer_synced.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/casbin/govaluate" - - "github.com/casbin/casbin/v2/persist" - "github.com/casbin/casbin/v2/rbac" - defaultrolemanager "github.com/casbin/casbin/v2/rbac/default-role-manager" -) - -// SyncedEnforcer wraps Enforcer and provides synchronized access. -type SyncedEnforcer struct { - *Enforcer - m sync.RWMutex - stopAutoLoad chan struct{} - autoLoadRunning int32 -} - -// NewSyncedEnforcer creates a synchronized enforcer via file or DB. -func NewSyncedEnforcer(params ...interface{}) (*SyncedEnforcer, error) { - e := &SyncedEnforcer{} - var err error - e.Enforcer, err = NewEnforcer(params...) - if err != nil { - return nil, err - } - - e.stopAutoLoad = make(chan struct{}, 1) - e.autoLoadRunning = 0 - return e, nil -} - -// GetLock return the private RWMutex lock. -func (e *SyncedEnforcer) GetLock() *sync.RWMutex { - return &e.m -} - -// IsAutoLoadingRunning check if SyncedEnforcer is auto loading policies. -func (e *SyncedEnforcer) IsAutoLoadingRunning() bool { - return atomic.LoadInt32(&(e.autoLoadRunning)) != 0 -} - -// StartAutoLoadPolicy starts a go routine that will every specified duration call LoadPolicy. -func (e *SyncedEnforcer) StartAutoLoadPolicy(d time.Duration) { - // Don't start another goroutine if there is already one running - if !atomic.CompareAndSwapInt32(&e.autoLoadRunning, 0, 1) { - return - } - - ticker := time.NewTicker(d) - go func() { - defer func() { - ticker.Stop() - atomic.StoreInt32(&(e.autoLoadRunning), int32(0)) - }() - n := 1 - for { - select { - case <-ticker.C: - // error intentionally ignored - _ = e.LoadPolicy() - // Uncomment this line to see when the policy is loaded. - // log.Print("Load policy for time: ", n) - n++ - case <-e.stopAutoLoad: - return - } - } - }() -} - -// StopAutoLoadPolicy causes the go routine to exit. -func (e *SyncedEnforcer) StopAutoLoadPolicy() { - if e.IsAutoLoadingRunning() { - e.stopAutoLoad <- struct{}{} - } -} - -// SetWatcher sets the current watcher. -func (e *SyncedEnforcer) SetWatcher(watcher persist.Watcher) error { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SetWatcher(watcher) -} - -// LoadModel reloads the model from the model CONF file. -func (e *SyncedEnforcer) LoadModel() error { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.LoadModel() -} - -// ClearPolicy clears all policy. -func (e *SyncedEnforcer) ClearPolicy() { - e.m.Lock() - defer e.m.Unlock() - e.Enforcer.ClearPolicy() -} - -// LoadPolicy reloads the policy from file/database. -func (e *SyncedEnforcer) LoadPolicy() error { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.LoadPolicy() -} - -// LoadPolicyFast is not blocked when adapter calls LoadPolicy. -func (e *SyncedEnforcer) LoadPolicyFast() error { - e.m.RLock() - newModel := e.model.Copy() - e.m.RUnlock() - - newModel.ClearPolicy() - newRmMap := map[string]rbac.RoleManager{} - var err error - - if err = e.adapter.LoadPolicy(newModel); err != nil && err.Error() != "invalid file path, file path cannot be empty" { - return err - } - - if err = newModel.SortPoliciesBySubjectHierarchy(); err != nil { - return err - } - - if err = newModel.SortPoliciesByPriority(); err != nil { - return err - } - - if e.autoBuildRoleLinks { - for ptype := range newModel["g"] { - newRmMap[ptype] = defaultrolemanager.NewRoleManager(10) - } - err = newModel.BuildRoleLinks(newRmMap) - if err != nil { - return err - } - } - - // reduce the lock range - e.m.Lock() - defer e.m.Unlock() - e.model = newModel - e.rmMap = newRmMap - return nil -} - -// LoadFilteredPolicy reloads a filtered policy from file/database. -func (e *SyncedEnforcer) LoadFilteredPolicy(filter interface{}) error { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.LoadFilteredPolicy(filter) -} - -// LoadIncrementalFilteredPolicy reloads a filtered policy from file/database. -func (e *SyncedEnforcer) LoadIncrementalFilteredPolicy(filter interface{}) error { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.LoadIncrementalFilteredPolicy(filter) -} - -// SavePolicy saves the current policy (usually after changed with Casbin API) back to file/database. -func (e *SyncedEnforcer) SavePolicy() error { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SavePolicy() -} - -// BuildRoleLinks manually rebuild the role inheritance relations. -func (e *SyncedEnforcer) BuildRoleLinks() error { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.BuildRoleLinks() -} - -// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). -func (e *SyncedEnforcer) Enforce(rvals ...interface{}) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.Enforce(rvals...) -} - -// EnforceWithMatcher use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". -func (e *SyncedEnforcer) EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.EnforceWithMatcher(matcher, rvals...) -} - -// EnforceEx explain enforcement by informing matched rules. -func (e *SyncedEnforcer) EnforceEx(rvals ...interface{}) (bool, []string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.EnforceEx(rvals...) -} - -// EnforceExWithMatcher use a custom matcher and explain enforcement by informing matched rules. -func (e *SyncedEnforcer) EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.EnforceExWithMatcher(matcher, rvals...) -} - -// BatchEnforce enforce in batches. -func (e *SyncedEnforcer) BatchEnforce(requests [][]interface{}) ([]bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.BatchEnforce(requests) -} - -// BatchEnforceWithMatcher enforce with matcher in batches. -func (e *SyncedEnforcer) BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.BatchEnforceWithMatcher(matcher, requests) -} - -// GetAllSubjects gets the list of subjects that show up in the current policy. -func (e *SyncedEnforcer) GetAllSubjects() ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllSubjects() -} - -// GetAllNamedSubjects gets the list of subjects that show up in the current named policy. -func (e *SyncedEnforcer) GetAllNamedSubjects(ptype string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllNamedSubjects(ptype) -} - -// GetAllObjects gets the list of objects that show up in the current policy. -func (e *SyncedEnforcer) GetAllObjects() ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllObjects() -} - -// GetAllNamedObjects gets the list of objects that show up in the current named policy. -func (e *SyncedEnforcer) GetAllNamedObjects(ptype string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllNamedObjects(ptype) -} - -// GetAllActions gets the list of actions that show up in the current policy. -func (e *SyncedEnforcer) GetAllActions() ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllActions() -} - -// GetAllNamedActions gets the list of actions that show up in the current named policy. -func (e *SyncedEnforcer) GetAllNamedActions(ptype string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllNamedActions(ptype) -} - -// GetAllRoles gets the list of roles that show up in the current policy. -func (e *SyncedEnforcer) GetAllRoles() ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllRoles() -} - -// GetAllNamedRoles gets the list of roles that show up in the current named policy. -func (e *SyncedEnforcer) GetAllNamedRoles(ptype string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetAllNamedRoles(ptype) -} - -// GetPolicy gets all the authorization rules in the policy. -func (e *SyncedEnforcer) GetPolicy() ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetPolicy() -} - -// GetFilteredPolicy gets all the authorization rules in the policy, field filters can be specified. -func (e *SyncedEnforcer) GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetFilteredPolicy(fieldIndex, fieldValues...) -} - -// GetNamedPolicy gets all the authorization rules in the named policy. -func (e *SyncedEnforcer) GetNamedPolicy(ptype string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetNamedPolicy(ptype) -} - -// GetFilteredNamedPolicy gets all the authorization rules in the named policy, field filters can be specified. -func (e *SyncedEnforcer) GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetFilteredNamedPolicy(ptype, fieldIndex, fieldValues...) -} - -// GetGroupingPolicy gets all the role inheritance rules in the policy. -func (e *SyncedEnforcer) GetGroupingPolicy() ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetGroupingPolicy() -} - -// GetFilteredGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. -func (e *SyncedEnforcer) GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetFilteredGroupingPolicy(fieldIndex, fieldValues...) -} - -// GetNamedGroupingPolicy gets all the role inheritance rules in the policy. -func (e *SyncedEnforcer) GetNamedGroupingPolicy(ptype string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetNamedGroupingPolicy(ptype) -} - -// GetFilteredNamedGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. -func (e *SyncedEnforcer) GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetFilteredNamedGroupingPolicy(ptype, fieldIndex, fieldValues...) -} - -// HasPolicy determines whether an authorization rule exists. -func (e *SyncedEnforcer) HasPolicy(params ...interface{}) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.HasPolicy(params...) -} - -// HasNamedPolicy determines whether a named authorization rule exists. -func (e *SyncedEnforcer) HasNamedPolicy(ptype string, params ...interface{}) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.HasNamedPolicy(ptype, params...) -} - -// AddPolicy adds an authorization rule to the current policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *SyncedEnforcer) AddPolicy(params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddPolicy(params...) -} - -// AddPolicies adds authorization rules to the current policy. -// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. -// Otherwise the function returns true for the corresponding rule by adding the new rule. -func (e *SyncedEnforcer) AddPolicies(rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddPolicies(rules) -} - -// AddPoliciesEx adds authorization rules to the current policy. -// If the rule already exists, the rule will not be added. -// But unlike AddPolicies, other non-existent rules are added instead of returning false directly. -func (e *SyncedEnforcer) AddPoliciesEx(rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddPoliciesEx(rules) -} - -// AddNamedPolicy adds an authorization rule to the current named policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *SyncedEnforcer) AddNamedPolicy(ptype string, params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddNamedPolicy(ptype, params...) -} - -// AddNamedPolicies adds authorization rules to the current named policy. -// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. -// Otherwise the function returns true for the corresponding by adding the new rule. -func (e *SyncedEnforcer) AddNamedPolicies(ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddNamedPolicies(ptype, rules) -} - -// AddNamedPoliciesEx adds authorization rules to the current named policy. -// If the rule already exists, the rule will not be added. -// But unlike AddNamedPolicies, other non-existent rules are added instead of returning false directly. -func (e *SyncedEnforcer) AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddNamedPoliciesEx(ptype, rules) -} - -// RemovePolicy removes an authorization rule from the current policy. -func (e *SyncedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemovePolicy(params...) -} - -// UpdatePolicy updates an authorization rule from the current policy. -func (e *SyncedEnforcer) UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdatePolicy(oldPolicy, newPolicy) -} - -func (e *SyncedEnforcer) UpdateNamedPolicy(ptype string, p1 []string, p2 []string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateNamedPolicy(ptype, p1, p2) -} - -// UpdatePolicies updates authorization rules from the current policies. -func (e *SyncedEnforcer) UpdatePolicies(oldPolices [][]string, newPolicies [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdatePolicies(oldPolices, newPolicies) -} - -func (e *SyncedEnforcer) UpdateNamedPolicies(ptype string, p1 [][]string, p2 [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateNamedPolicies(ptype, p1, p2) -} - -func (e *SyncedEnforcer) UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateFilteredPolicies(newPolicies, fieldIndex, fieldValues...) -} - -func (e *SyncedEnforcer) UpdateFilteredNamedPolicies(ptype string, newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateFilteredNamedPolicies(ptype, newPolicies, fieldIndex, fieldValues...) -} - -// RemovePolicies removes authorization rules from the current policy. -func (e *SyncedEnforcer) RemovePolicies(rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemovePolicies(rules) -} - -// RemoveFilteredPolicy removes an authorization rule from the current policy, field filters can be specified. -func (e *SyncedEnforcer) RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveFilteredPolicy(fieldIndex, fieldValues...) -} - -// RemoveNamedPolicy removes an authorization rule from the current named policy. -func (e *SyncedEnforcer) RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveNamedPolicy(ptype, params...) -} - -// RemoveNamedPolicies removes authorization rules from the current named policy. -func (e *SyncedEnforcer) RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveNamedPolicies(ptype, rules) -} - -// RemoveFilteredNamedPolicy removes an authorization rule from the current named policy, field filters can be specified. -func (e *SyncedEnforcer) RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveFilteredNamedPolicy(ptype, fieldIndex, fieldValues...) -} - -// HasGroupingPolicy determines whether a role inheritance rule exists. -func (e *SyncedEnforcer) HasGroupingPolicy(params ...interface{}) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.HasGroupingPolicy(params...) -} - -// HasNamedGroupingPolicy determines whether a named role inheritance rule exists. -func (e *SyncedEnforcer) HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.HasNamedGroupingPolicy(ptype, params...) -} - -// AddGroupingPolicy adds a role inheritance rule to the current policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *SyncedEnforcer) AddGroupingPolicy(params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddGroupingPolicy(params...) -} - -// AddGroupingPolicies adds role inheritance rulea to the current policy. -// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. -// Otherwise the function returns true for the corresponding policy rule by adding the new rule. -func (e *SyncedEnforcer) AddGroupingPolicies(rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddGroupingPolicies(rules) -} - -// AddGroupingPoliciesEx adds role inheritance rules to the current policy. -// If the rule already exists, the rule will not be added. -// But unlike AddGroupingPolicies, other non-existent rules are added instead of returning false directly. -func (e *SyncedEnforcer) AddGroupingPoliciesEx(rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddGroupingPoliciesEx(rules) -} - -// AddNamedGroupingPolicy adds a named role inheritance rule to the current policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *SyncedEnforcer) AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddNamedGroupingPolicy(ptype, params...) -} - -// AddNamedGroupingPolicies adds named role inheritance rules to the current policy. -// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. -// Otherwise the function returns true for the corresponding policy rule by adding the new rule. -func (e *SyncedEnforcer) AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddNamedGroupingPolicies(ptype, rules) -} - -// AddNamedGroupingPoliciesEx adds named role inheritance rules to the current policy. -// If the rule already exists, the rule will not be added. -// But unlike AddNamedGroupingPolicies, other non-existent rules are added instead of returning false directly. -func (e *SyncedEnforcer) AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddNamedGroupingPoliciesEx(ptype, rules) -} - -// RemoveGroupingPolicy removes a role inheritance rule from the current policy. -func (e *SyncedEnforcer) RemoveGroupingPolicy(params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveGroupingPolicy(params...) -} - -// RemoveGroupingPolicies removes role inheritance rules from the current policy. -func (e *SyncedEnforcer) RemoveGroupingPolicies(rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveGroupingPolicies(rules) -} - -// RemoveFilteredGroupingPolicy removes a role inheritance rule from the current policy, field filters can be specified. -func (e *SyncedEnforcer) RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveFilteredGroupingPolicy(fieldIndex, fieldValues...) -} - -// RemoveNamedGroupingPolicy removes a role inheritance rule from the current named policy. -func (e *SyncedEnforcer) RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveNamedGroupingPolicy(ptype, params...) -} - -// RemoveNamedGroupingPolicies removes role inheritance rules from the current named policy. -func (e *SyncedEnforcer) RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveNamedGroupingPolicies(ptype, rules) -} - -func (e *SyncedEnforcer) UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateGroupingPolicy(oldRule, newRule) -} - -func (e *SyncedEnforcer) UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateGroupingPolicies(oldRules, newRules) -} - -func (e *SyncedEnforcer) UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateNamedGroupingPolicy(ptype, oldRule, newRule) -} - -func (e *SyncedEnforcer) UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.UpdateNamedGroupingPolicies(ptype, oldRules, newRules) -} - -// RemoveFilteredNamedGroupingPolicy removes a role inheritance rule from the current named policy, field filters can be specified. -func (e *SyncedEnforcer) RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.RemoveFilteredNamedGroupingPolicy(ptype, fieldIndex, fieldValues...) -} - -// AddFunction adds a customized function. -func (e *SyncedEnforcer) AddFunction(name string, function govaluate.ExpressionFunction) { - e.m.Lock() - defer e.m.Unlock() - e.Enforcer.AddFunction(name, function) -} - -func (e *SyncedEnforcer) SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfAddPolicy(sec, ptype, rule) -} - -func (e *SyncedEnforcer) SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfAddPolicies(sec, ptype, rules) -} - -func (e *SyncedEnforcer) SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfAddPoliciesEx(sec, ptype, rules) -} - -func (e *SyncedEnforcer) SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfRemovePolicy(sec, ptype, rule) -} - -func (e *SyncedEnforcer) SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfRemovePolicies(sec, ptype, rules) -} - -func (e *SyncedEnforcer) SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfRemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) -} - -func (e *SyncedEnforcer) SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfUpdatePolicy(sec, ptype, oldRule, newRule) -} - -func (e *SyncedEnforcer) SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.SelfUpdatePolicies(sec, ptype, oldRules, newRules) -} diff --git a/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go b/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go deleted file mode 100644 index 2f358b37..00000000 --- a/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "errors" - -// Global errors for rbac defined here. -var ( - ErrNameNotFound = errors.New("error: name does not exist") - ErrDomainParameter = errors.New("error: domain should be 1 parameter") - ErrLinkNotFound = errors.New("error: link between name1 and name2 does not exist") - ErrUseDomainParameter = errors.New("error: useDomain should be 1 parameter") - ErrInvalidFieldValuesParameter = errors.New("fieldValues requires at least one parameter") - - // GetAllowedObjectConditions errors. - ErrObjCondition = errors.New("need to meet the prefix required by the object condition") - ErrEmptyCondition = errors.New("GetAllowedObjectConditions have an empty condition") -) diff --git a/vendor/github.com/casbin/casbin/v2/frontend.go b/vendor/github.com/casbin/casbin/v2/frontend.go deleted file mode 100644 index 101a23a5..00000000 --- a/vendor/github.com/casbin/casbin/v2/frontend.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2020 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "bytes" - "encoding/json" -) - -func CasbinJsGetPermissionForUser(e IEnforcer, user string) (string, error) { - model := e.GetModel() - m := map[string]interface{}{} - - m["m"] = model.ToText() - - pRules := [][]string{} - for ptype := range model["p"] { - policies, err := model.GetPolicy("p", ptype) - if err != nil { - return "", err - } - for _, rules := range policies { - pRules = append(pRules, append([]string{ptype}, rules...)) - } - } - m["p"] = pRules - - gRules := [][]string{} - for ptype := range model["g"] { - policies, err := model.GetPolicy("g", ptype) - if err != nil { - return "", err - } - for _, rules := range policies { - gRules = append(gRules, append([]string{ptype}, rules...)) - } - } - m["g"] = gRules - - result := bytes.NewBuffer([]byte{}) - encoder := json.NewEncoder(result) - encoder.SetEscapeHTML(false) - err := encoder.Encode(m) - return result.String(), err -} diff --git a/vendor/github.com/casbin/casbin/v2/frontend_old.go b/vendor/github.com/casbin/casbin/v2/frontend_old.go deleted file mode 100644 index 139b164f..00000000 --- a/vendor/github.com/casbin/casbin/v2/frontend_old.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2021 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import "encoding/json" - -func CasbinJsGetPermissionForUserOld(e IEnforcer, user string) ([]byte, error) { - policy, err := e.GetImplicitPermissionsForUser(user) - if err != nil { - return nil, err - } - permission := make(map[string][]string) - for i := 0; i < len(policy); i++ { - permission[policy[i][2]] = append(permission[policy[i][2]], policy[i][1]) - } - b, _ := json.Marshal(permission) - return b, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/internal_api.go b/vendor/github.com/casbin/casbin/v2/internal_api.go deleted file mode 100644 index cd329016..00000000 --- a/vendor/github.com/casbin/casbin/v2/internal_api.go +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "fmt" - - Err "github.com/casbin/casbin/v2/errors" - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" -) - -const ( - notImplemented = "not implemented" -) - -func (e *Enforcer) shouldPersist() bool { - return e.adapter != nil && e.autoSave -} - -func (e *Enforcer) shouldNotify() bool { - return e.watcher != nil && e.autoNotifyWatcher -} - -// addPolicy adds a rule to the current policy. -func (e *Enforcer) addPolicyWithoutNotify(sec string, ptype string, rule []string) (bool, error) { - if e.dispatcher != nil && e.autoNotifyDispatcher { - return true, e.dispatcher.AddPolicies(sec, ptype, [][]string{rule}) - } - - hasPolicy, err := e.model.HasPolicy(sec, ptype, rule) - if hasPolicy || err != nil { - return hasPolicy, err - } - - if e.shouldPersist() { - if err = e.adapter.AddPolicy(sec, ptype, rule); err != nil { - if err.Error() != notImplemented { - return false, err - } - } - } - - err = e.model.AddPolicy(sec, ptype, rule) - if err != nil { - return false, err - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{rule}) - if err != nil { - return true, err - } - } - - return true, nil -} - -// addPoliciesWithoutNotify adds rules to the current policy without notify -// If autoRemoveRepeat == true, existing rules are automatically filtered -// Otherwise, false is returned directly. -func (e *Enforcer) addPoliciesWithoutNotify(sec string, ptype string, rules [][]string, autoRemoveRepeat bool) (bool, error) { - if e.dispatcher != nil && e.autoNotifyDispatcher { - return true, e.dispatcher.AddPolicies(sec, ptype, rules) - } - - if !autoRemoveRepeat { - hasPolicies, err := e.model.HasPolicies(sec, ptype, rules) - if hasPolicies || err != nil { - return false, err - } - } - - if e.shouldPersist() { - if err := e.adapter.(persist.BatchAdapter).AddPolicies(sec, ptype, rules); err != nil { - if err.Error() != notImplemented { - return false, err - } - } - } - - err := e.model.AddPolicies(sec, ptype, rules) - if err != nil { - return false, err - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, rules) - if err != nil { - return true, err - } - - err = e.BuildIncrementalConditionalRoleLinks(model.PolicyAdd, ptype, rules) - if err != nil { - return true, err - } - } - - return true, nil -} - -// removePolicy removes a rule from the current policy. -func (e *Enforcer) removePolicyWithoutNotify(sec string, ptype string, rule []string) (bool, error) { - if e.dispatcher != nil && e.autoNotifyDispatcher { - return true, e.dispatcher.RemovePolicies(sec, ptype, [][]string{rule}) - } - - if e.shouldPersist() { - if err := e.adapter.RemovePolicy(sec, ptype, rule); err != nil { - if err.Error() != notImplemented { - return false, err - } - } - } - - ruleRemoved, err := e.model.RemovePolicy(sec, ptype, rule) - if !ruleRemoved || err != nil { - return ruleRemoved, err - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{rule}) - if err != nil { - return ruleRemoved, err - } - } - - return ruleRemoved, nil -} - -func (e *Enforcer) updatePolicyWithoutNotify(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { - if e.dispatcher != nil && e.autoNotifyDispatcher { - return true, e.dispatcher.UpdatePolicy(sec, ptype, oldRule, newRule) - } - - if e.shouldPersist() { - if err := e.adapter.(persist.UpdatableAdapter).UpdatePolicy(sec, ptype, oldRule, newRule); err != nil { - if err.Error() != notImplemented { - return false, err - } - } - } - ruleUpdated, err := e.model.UpdatePolicy(sec, ptype, oldRule, newRule) - if !ruleUpdated || err != nil { - return ruleUpdated, err - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{oldRule}) // remove the old rule - if err != nil { - return ruleUpdated, err - } - err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{newRule}) // add the new rule - if err != nil { - return ruleUpdated, err - } - } - - return ruleUpdated, nil -} - -func (e *Enforcer) updatePoliciesWithoutNotify(sec string, ptype string, oldRules [][]string, newRules [][]string) (bool, error) { - if len(newRules) != len(oldRules) { - return false, fmt.Errorf("the length of oldRules should be equal to the length of newRules, but got the length of oldRules is %d, the length of newRules is %d", len(oldRules), len(newRules)) - } - - if e.dispatcher != nil && e.autoNotifyDispatcher { - return true, e.dispatcher.UpdatePolicies(sec, ptype, oldRules, newRules) - } - - if e.shouldPersist() { - if err := e.adapter.(persist.UpdatableAdapter).UpdatePolicies(sec, ptype, oldRules, newRules); err != nil { - if err.Error() != notImplemented { - return false, err - } - } - } - - ruleUpdated, err := e.model.UpdatePolicies(sec, ptype, oldRules, newRules) - if !ruleUpdated || err != nil { - return ruleUpdated, err - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rules - if err != nil { - return ruleUpdated, err - } - err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rules - if err != nil { - return ruleUpdated, err - } - } - - return ruleUpdated, nil -} - -// removePolicies removes rules from the current policy. -func (e *Enforcer) removePoliciesWithoutNotify(sec string, ptype string, rules [][]string) (bool, error) { - if hasPolicies, err := e.model.HasPolicies(sec, ptype, rules); !hasPolicies || err != nil { - return hasPolicies, err - } - - if e.dispatcher != nil && e.autoNotifyDispatcher { - return true, e.dispatcher.RemovePolicies(sec, ptype, rules) - } - - if e.shouldPersist() { - if err := e.adapter.(persist.BatchAdapter).RemovePolicies(sec, ptype, rules); err != nil { - if err.Error() != notImplemented { - return false, err - } - } - } - - rulesRemoved, err := e.model.RemovePolicies(sec, ptype, rules) - if !rulesRemoved || err != nil { - return rulesRemoved, err - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, rules) - if err != nil { - return rulesRemoved, err - } - } - return rulesRemoved, nil -} - -// removeFilteredPolicy removes rules based on field filters from the current policy. -func (e *Enforcer) removeFilteredPolicyWithoutNotify(sec string, ptype string, fieldIndex int, fieldValues []string) (bool, error) { - if len(fieldValues) == 0 { - return false, Err.ErrInvalidFieldValuesParameter - } - - if e.dispatcher != nil && e.autoNotifyDispatcher { - return true, e.dispatcher.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) - } - - if e.shouldPersist() { - if err := e.adapter.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...); err != nil { - if err.Error() != notImplemented { - return false, err - } - } - } - - ruleRemoved, effects, err := e.model.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) - if !ruleRemoved || err != nil { - return ruleRemoved, err - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, effects) - if err != nil { - return ruleRemoved, err - } - } - - return ruleRemoved, nil -} - -func (e *Enforcer) updateFilteredPoliciesWithoutNotify(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) { - var ( - oldRules [][]string - err error - ) - - if _, err = e.model.GetAssertion(sec, ptype); err != nil { - return oldRules, err - } - - if e.shouldPersist() { - if oldRules, err = e.adapter.(persist.UpdatableAdapter).UpdateFilteredPolicies(sec, ptype, newRules, fieldIndex, fieldValues...); err != nil { - if err.Error() != notImplemented { - return nil, err - } - } - // For compatibility, because some adapters return oldRules containing ptype, see https://github.com/casbin/xorm-adapter/issues/49 - for i, oldRule := range oldRules { - if len(oldRules[i]) == len(e.model[sec][ptype].Tokens)+1 { - oldRules[i] = oldRule[1:] - } - } - } - - if e.dispatcher != nil && e.autoNotifyDispatcher { - return oldRules, e.dispatcher.UpdateFilteredPolicies(sec, ptype, oldRules, newRules) - } - - ruleChanged, err := e.model.RemovePolicies(sec, ptype, oldRules) - if err != nil { - return oldRules, err - } - err = e.model.AddPolicies(sec, ptype, newRules) - if err != nil { - return oldRules, err - } - ruleChanged = ruleChanged && len(newRules) != 0 - if !ruleChanged { - return make([][]string, 0), nil - } - - if sec == "g" { - err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rules - if err != nil { - return oldRules, err - } - err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rules - if err != nil { - return oldRules, err - } - } - - return oldRules, nil -} - -// addPolicy adds a rule to the current policy. -func (e *Enforcer) addPolicy(sec string, ptype string, rule []string) (bool, error) { - ok, err := e.addPolicyWithoutNotify(sec, ptype, rule) - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.WatcherEx); ok { - err = watcher.UpdateForAddPolicy(sec, ptype, rule...) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -// addPolicies adds rules to the current policy. -// If autoRemoveRepeat == true, existing rules are automatically filtered -// Otherwise, false is returned directly. -func (e *Enforcer) addPolicies(sec string, ptype string, rules [][]string, autoRemoveRepeat bool) (bool, error) { - ok, err := e.addPoliciesWithoutNotify(sec, ptype, rules, autoRemoveRepeat) - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.WatcherEx); ok { - err = watcher.UpdateForAddPolicies(sec, ptype, rules...) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -// removePolicy removes a rule from the current policy. -func (e *Enforcer) removePolicy(sec string, ptype string, rule []string) (bool, error) { - ok, err := e.removePolicyWithoutNotify(sec, ptype, rule) - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.WatcherEx); ok { - err = watcher.UpdateForRemovePolicy(sec, ptype, rule...) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -func (e *Enforcer) updatePolicy(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { - ok, err := e.updatePolicyWithoutNotify(sec, ptype, oldRule, newRule) - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { - err = watcher.UpdateForUpdatePolicy(sec, ptype, oldRule, newRule) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -func (e *Enforcer) updatePolicies(sec string, ptype string, oldRules [][]string, newRules [][]string) (bool, error) { - ok, err := e.updatePoliciesWithoutNotify(sec, ptype, oldRules, newRules) - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { - err = watcher.UpdateForUpdatePolicies(sec, ptype, oldRules, newRules) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -// removePolicies removes rules from the current policy. -func (e *Enforcer) removePolicies(sec string, ptype string, rules [][]string) (bool, error) { - ok, err := e.removePoliciesWithoutNotify(sec, ptype, rules) - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.WatcherEx); ok { - err = watcher.UpdateForRemovePolicies(sec, ptype, rules...) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -// removeFilteredPolicy removes rules based on field filters from the current policy. -func (e *Enforcer) removeFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues []string) (bool, error) { - ok, err := e.removeFilteredPolicyWithoutNotify(sec, ptype, fieldIndex, fieldValues) - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.WatcherEx); ok { - err = watcher.UpdateForRemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -func (e *Enforcer) updateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) { - oldRules, err := e.updateFilteredPoliciesWithoutNotify(sec, ptype, newRules, fieldIndex, fieldValues...) - ok := len(oldRules) != 0 - if !ok || err != nil { - return ok, err - } - - if e.shouldNotify() { - var err error - if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { - err = watcher.UpdateForUpdatePolicies(sec, ptype, oldRules, newRules) - } else { - err = e.watcher.Update() - } - return true, err - } - - return true, nil -} - -func (e *Enforcer) GetFieldIndex(ptype string, field string) (int, error) { - return e.model.GetFieldIndex(ptype, field) -} - -func (e *Enforcer) SetFieldIndex(ptype string, field string, index int) { - assertion := e.model["p"][ptype] - assertion.FieldIndexMap[field] = index -} diff --git a/vendor/github.com/casbin/casbin/v2/log/default_logger.go b/vendor/github.com/casbin/casbin/v2/log/default_logger.go deleted file mode 100644 index 9994f390..00000000 --- a/vendor/github.com/casbin/casbin/v2/log/default_logger.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import ( - "fmt" - "log" - "strings" -) - -// DefaultLogger is the implementation for a Logger using golang log. -type DefaultLogger struct { - enabled bool -} - -func (l *DefaultLogger) EnableLog(enable bool) { - l.enabled = enable -} - -func (l *DefaultLogger) IsEnabled() bool { - return l.enabled -} - -func (l *DefaultLogger) LogModel(model [][]string) { - if !l.enabled { - return - } - var str strings.Builder - str.WriteString("Model: ") - for _, v := range model { - str.WriteString(fmt.Sprintf("%v\n", v)) - } - - log.Println(str.String()) -} - -func (l *DefaultLogger) LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) { - if !l.enabled { - return - } - - var reqStr strings.Builder - reqStr.WriteString("Request: ") - for i, rval := range request { - if i != len(request)-1 { - reqStr.WriteString(fmt.Sprintf("%v, ", rval)) - } else { - reqStr.WriteString(fmt.Sprintf("%v", rval)) - } - } - reqStr.WriteString(fmt.Sprintf(" ---> %t\n", result)) - - reqStr.WriteString("Hit Policy: ") - for i, pval := range explains { - if i != len(explains)-1 { - reqStr.WriteString(fmt.Sprintf("%v, ", pval)) - } else { - reqStr.WriteString(fmt.Sprintf("%v \n", pval)) - } - } - - log.Println(reqStr.String()) -} - -func (l *DefaultLogger) LogPolicy(policy map[string][][]string) { - if !l.enabled { - return - } - - var str strings.Builder - str.WriteString("Policy: ") - for k, v := range policy { - str.WriteString(fmt.Sprintf("%s : %v\n", k, v)) - } - - log.Println(str.String()) -} - -func (l *DefaultLogger) LogRole(roles []string) { - if !l.enabled { - return - } - - log.Println("Roles: ", strings.Join(roles, "\n")) -} - -func (l *DefaultLogger) LogError(err error, msg ...string) { - if !l.enabled { - return - } - log.Println(msg, err) -} diff --git a/vendor/github.com/casbin/casbin/v2/log/log_util.go b/vendor/github.com/casbin/casbin/v2/log/log_util.go deleted file mode 100644 index 7edabf89..00000000 --- a/vendor/github.com/casbin/casbin/v2/log/log_util.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -var logger Logger = &DefaultLogger{} - -// SetLogger sets the current logger. -func SetLogger(l Logger) { - logger = l -} - -// GetLogger returns the current logger. -func GetLogger() Logger { - return logger -} - -// LogModel logs the model information. -func LogModel(model [][]string) { - logger.LogModel(model) -} - -// LogEnforce logs the enforcer information. -func LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) { - logger.LogEnforce(matcher, request, result, explains) -} - -// LogRole log info related to role. -func LogRole(roles []string) { - logger.LogRole(roles) -} - -// LogPolicy logs the policy information. -func LogPolicy(policy map[string][][]string) { - logger.LogPolicy(policy) -} - -// LogError logs the error information. -func LogError(err error, msg ...string) { - logger.LogError(err, msg...) -} diff --git a/vendor/github.com/casbin/casbin/v2/log/logger.go b/vendor/github.com/casbin/casbin/v2/log/logger.go deleted file mode 100644 index 8982cae6..00000000 --- a/vendor/github.com/casbin/casbin/v2/log/logger.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -//go:generate mockgen -destination=./mocks/mock_logger.go -package=mocks github.com/casbin/casbin/v2/log Logger - -// Logger is the logging interface implementation. -type Logger interface { - // EnableLog controls whether print the message. - EnableLog(bool) - - // IsEnabled returns if logger is enabled. - IsEnabled() bool - - // LogModel log info related to model. - LogModel(model [][]string) - - // LogEnforce log info related to enforce. - LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) - - // LogRole log info related to role. - LogRole(roles []string) - - // LogPolicy log info related to policy. - LogPolicy(policy map[string][][]string) - - // LogError log info relate to error - LogError(err error, msg ...string) -} diff --git a/vendor/github.com/casbin/casbin/v2/management_api.go b/vendor/github.com/casbin/casbin/v2/management_api.go deleted file mode 100644 index 44fe749b..00000000 --- a/vendor/github.com/casbin/casbin/v2/management_api.go +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "errors" - "fmt" - "strings" - - "github.com/casbin/casbin/v2/util" - "github.com/casbin/govaluate" -) - -// GetAllSubjects gets the list of subjects that show up in the current policy. -func (e *Enforcer) GetAllSubjects() ([]string, error) { - return e.model.GetValuesForFieldInPolicyAllTypes("p", 0) -} - -// GetAllNamedSubjects gets the list of subjects that show up in the current named policy. -func (e *Enforcer) GetAllNamedSubjects(ptype string) ([]string, error) { - return e.model.GetValuesForFieldInPolicy("p", ptype, 0) -} - -// GetAllObjects gets the list of objects that show up in the current policy. -func (e *Enforcer) GetAllObjects() ([]string, error) { - return e.model.GetValuesForFieldInPolicyAllTypes("p", 1) -} - -// GetAllNamedObjects gets the list of objects that show up in the current named policy. -func (e *Enforcer) GetAllNamedObjects(ptype string) ([]string, error) { - return e.model.GetValuesForFieldInPolicy("p", ptype, 1) -} - -// GetAllActions gets the list of actions that show up in the current policy. -func (e *Enforcer) GetAllActions() ([]string, error) { - return e.model.GetValuesForFieldInPolicyAllTypes("p", 2) -} - -// GetAllNamedActions gets the list of actions that show up in the current named policy. -func (e *Enforcer) GetAllNamedActions(ptype string) ([]string, error) { - return e.model.GetValuesForFieldInPolicy("p", ptype, 2) -} - -// GetAllRoles gets the list of roles that show up in the current policy. -func (e *Enforcer) GetAllRoles() ([]string, error) { - return e.model.GetValuesForFieldInPolicyAllTypes("g", 1) -} - -// GetAllNamedRoles gets the list of roles that show up in the current named policy. -func (e *Enforcer) GetAllNamedRoles(ptype string) ([]string, error) { - return e.model.GetValuesForFieldInPolicy("g", ptype, 1) -} - -// GetPolicy gets all the authorization rules in the policy. -func (e *Enforcer) GetPolicy() ([][]string, error) { - return e.GetNamedPolicy("p") -} - -// GetFilteredPolicy gets all the authorization rules in the policy, field filters can be specified. -func (e *Enforcer) GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { - return e.GetFilteredNamedPolicy("p", fieldIndex, fieldValues...) -} - -// GetNamedPolicy gets all the authorization rules in the named policy. -func (e *Enforcer) GetNamedPolicy(ptype string) ([][]string, error) { - return e.model.GetPolicy("p", ptype) -} - -// GetFilteredNamedPolicy gets all the authorization rules in the named policy, field filters can be specified. -func (e *Enforcer) GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { - return e.model.GetFilteredPolicy("p", ptype, fieldIndex, fieldValues...) -} - -// GetGroupingPolicy gets all the role inheritance rules in the policy. -func (e *Enforcer) GetGroupingPolicy() ([][]string, error) { - return e.GetNamedGroupingPolicy("g") -} - -// GetFilteredGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. -func (e *Enforcer) GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { - return e.GetFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) -} - -// GetNamedGroupingPolicy gets all the role inheritance rules in the policy. -func (e *Enforcer) GetNamedGroupingPolicy(ptype string) ([][]string, error) { - return e.model.GetPolicy("g", ptype) -} - -// GetFilteredNamedGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. -func (e *Enforcer) GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { - return e.model.GetFilteredPolicy("g", ptype, fieldIndex, fieldValues...) -} - -// GetFilteredNamedPolicyWithMatcher gets rules based on matcher from the policy. -func (e *Enforcer) GetFilteredNamedPolicyWithMatcher(ptype string, matcher string) ([][]string, error) { - var res [][]string - var err error - - functions := e.fm.GetFunctions() - if _, ok := e.model["g"]; ok { - for key, ast := range e.model["g"] { - // g must be a normal role definition (ast.RM != nil) - // or a conditional role definition (ast.CondRM != nil) - // ast.RM and ast.CondRM shouldn't be nil at the same time - if ast.RM != nil { - functions[key] = util.GenerateGFunction(ast.RM) - } - if ast.CondRM != nil { - functions[key] = util.GenerateConditionalGFunction(ast.CondRM) - } - } - } - - var expString string - if matcher == "" { - return res, fmt.Errorf("matcher is empty") - } else { - expString = util.RemoveComments(util.EscapeAssertion(matcher)) - } - - var expression *govaluate.EvaluableExpression - - expression, err = govaluate.NewEvaluableExpressionWithFunctions(expString, functions) - if err != nil { - return res, err - } - - pTokens := make(map[string]int, len(e.model["p"][ptype].Tokens)) - for i, token := range e.model["p"][ptype].Tokens { - pTokens[token] = i - } - - parameters := enforceParameters{ - pTokens: pTokens, - } - - if policyLen := len(e.model["p"][ptype].Policy); policyLen != 0 && strings.Contains(expString, ptype+"_") { - for _, pvals := range e.model["p"][ptype].Policy { - if len(e.model["p"][ptype].Tokens) != len(pvals) { - return res, fmt.Errorf( - "invalid policy size: expected %d, got %d, pvals: %v", - len(e.model["p"][ptype].Tokens), - len(pvals), - pvals) - } - - parameters.pVals = pvals - - result, err := expression.Eval(parameters) - - if err != nil { - return res, err - } - - switch result := result.(type) { - case bool: - if result { - res = append(res, pvals) - } - case float64: - if result != 0 { - res = append(res, pvals) - } - default: - return res, errors.New("matcher result should be bool, int or float") - } - } - } - return res, nil -} - -// HasPolicy determines whether an authorization rule exists. -func (e *Enforcer) HasPolicy(params ...interface{}) (bool, error) { - return e.HasNamedPolicy("p", params...) -} - -// HasNamedPolicy determines whether a named authorization rule exists. -func (e *Enforcer) HasNamedPolicy(ptype string, params ...interface{}) (bool, error) { - if strSlice, ok := params[0].([]string); len(params) == 1 && ok { - return e.model.HasPolicy("p", ptype, strSlice) - } - - policy := make([]string, 0) - for _, param := range params { - policy = append(policy, param.(string)) - } - - return e.model.HasPolicy("p", ptype, policy) -} - -// AddPolicy adds an authorization rule to the current policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *Enforcer) AddPolicy(params ...interface{}) (bool, error) { - return e.AddNamedPolicy("p", params...) -} - -// AddPolicies adds authorization rules to the current policy. -// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. -// Otherwise the function returns true for the corresponding rule by adding the new rule. -func (e *Enforcer) AddPolicies(rules [][]string) (bool, error) { - return e.AddNamedPolicies("p", rules) -} - -// AddPoliciesEx adds authorization rules to the current policy. -// If the rule already exists, the rule will not be added. -// But unlike AddPolicies, other non-existent rules are added instead of returning false directly. -func (e *Enforcer) AddPoliciesEx(rules [][]string) (bool, error) { - return e.AddNamedPoliciesEx("p", rules) -} - -// AddNamedPolicy adds an authorization rule to the current named policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *Enforcer) AddNamedPolicy(ptype string, params ...interface{}) (bool, error) { - if strSlice, ok := params[0].([]string); len(params) == 1 && ok { - strSlice = append(make([]string, 0, len(strSlice)), strSlice...) - return e.addPolicy("p", ptype, strSlice) - } - policy := make([]string, 0) - for _, param := range params { - policy = append(policy, param.(string)) - } - - return e.addPolicy("p", ptype, policy) -} - -// AddNamedPolicies adds authorization rules to the current named policy. -// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. -// Otherwise the function returns true for the corresponding by adding the new rule. -func (e *Enforcer) AddNamedPolicies(ptype string, rules [][]string) (bool, error) { - return e.addPolicies("p", ptype, rules, false) -} - -// AddNamedPoliciesEx adds authorization rules to the current named policy. -// If the rule already exists, the rule will not be added. -// But unlike AddNamedPolicies, other non-existent rules are added instead of returning false directly. -func (e *Enforcer) AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) { - return e.addPolicies("p", ptype, rules, true) -} - -// RemovePolicy removes an authorization rule from the current policy. -func (e *Enforcer) RemovePolicy(params ...interface{}) (bool, error) { - return e.RemoveNamedPolicy("p", params...) -} - -// UpdatePolicy updates an authorization rule from the current policy. -func (e *Enforcer) UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) { - return e.UpdateNamedPolicy("p", oldPolicy, newPolicy) -} - -func (e *Enforcer) UpdateNamedPolicy(ptype string, p1 []string, p2 []string) (bool, error) { - return e.updatePolicy("p", ptype, p1, p2) -} - -// UpdatePolicies updates authorization rules from the current policies. -func (e *Enforcer) UpdatePolicies(oldPolices [][]string, newPolicies [][]string) (bool, error) { - return e.UpdateNamedPolicies("p", oldPolices, newPolicies) -} - -func (e *Enforcer) UpdateNamedPolicies(ptype string, p1 [][]string, p2 [][]string) (bool, error) { - return e.updatePolicies("p", ptype, p1, p2) -} - -func (e *Enforcer) UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { - return e.UpdateFilteredNamedPolicies("p", newPolicies, fieldIndex, fieldValues...) -} - -func (e *Enforcer) UpdateFilteredNamedPolicies(ptype string, newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { - return e.updateFilteredPolicies("p", ptype, newPolicies, fieldIndex, fieldValues...) -} - -// RemovePolicies removes authorization rules from the current policy. -func (e *Enforcer) RemovePolicies(rules [][]string) (bool, error) { - return e.RemoveNamedPolicies("p", rules) -} - -// RemoveFilteredPolicy removes an authorization rule from the current policy, field filters can be specified. -func (e *Enforcer) RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) { - return e.RemoveFilteredNamedPolicy("p", fieldIndex, fieldValues...) -} - -// RemoveNamedPolicy removes an authorization rule from the current named policy. -func (e *Enforcer) RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) { - if strSlice, ok := params[0].([]string); len(params) == 1 && ok { - return e.removePolicy("p", ptype, strSlice) - } - policy := make([]string, 0) - for _, param := range params { - policy = append(policy, param.(string)) - } - - return e.removePolicy("p", ptype, policy) -} - -// RemoveNamedPolicies removes authorization rules from the current named policy. -func (e *Enforcer) RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) { - return e.removePolicies("p", ptype, rules) -} - -// RemoveFilteredNamedPolicy removes an authorization rule from the current named policy, field filters can be specified. -func (e *Enforcer) RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { - return e.removeFilteredPolicy("p", ptype, fieldIndex, fieldValues) -} - -// HasGroupingPolicy determines whether a role inheritance rule exists. -func (e *Enforcer) HasGroupingPolicy(params ...interface{}) (bool, error) { - return e.HasNamedGroupingPolicy("g", params...) -} - -// HasNamedGroupingPolicy determines whether a named role inheritance rule exists. -func (e *Enforcer) HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { - if strSlice, ok := params[0].([]string); len(params) == 1 && ok { - return e.model.HasPolicy("g", ptype, strSlice) - } - - policy := make([]string, 0) - for _, param := range params { - policy = append(policy, param.(string)) - } - - return e.model.HasPolicy("g", ptype, policy) -} - -// AddGroupingPolicy adds a role inheritance rule to the current policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *Enforcer) AddGroupingPolicy(params ...interface{}) (bool, error) { - return e.AddNamedGroupingPolicy("g", params...) -} - -// AddGroupingPolicies adds role inheritance rules to the current policy. -// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. -// Otherwise the function returns true for the corresponding policy rule by adding the new rule. -func (e *Enforcer) AddGroupingPolicies(rules [][]string) (bool, error) { - return e.AddNamedGroupingPolicies("g", rules) -} - -// AddGroupingPoliciesEx adds role inheritance rules to the current policy. -// If the rule already exists, the rule will not be added. -// But unlike AddGroupingPolicies, other non-existent rules are added instead of returning false directly. -func (e *Enforcer) AddGroupingPoliciesEx(rules [][]string) (bool, error) { - return e.AddNamedGroupingPoliciesEx("g", rules) -} - -// AddNamedGroupingPolicy adds a named role inheritance rule to the current policy. -// If the rule already exists, the function returns false and the rule will not be added. -// Otherwise the function returns true by adding the new rule. -func (e *Enforcer) AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { - var ruleAdded bool - var err error - if strSlice, ok := params[0].([]string); len(params) == 1 && ok { - ruleAdded, err = e.addPolicy("g", ptype, strSlice) - } else { - policy := make([]string, 0) - for _, param := range params { - policy = append(policy, param.(string)) - } - - ruleAdded, err = e.addPolicy("g", ptype, policy) - } - - return ruleAdded, err -} - -// AddNamedGroupingPolicies adds named role inheritance rules to the current policy. -// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. -// Otherwise the function returns true for the corresponding policy rule by adding the new rule. -func (e *Enforcer) AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { - return e.addPolicies("g", ptype, rules, false) -} - -// AddNamedGroupingPoliciesEx adds named role inheritance rules to the current policy. -// If the rule already exists, the rule will not be added. -// But unlike AddNamedGroupingPolicies, other non-existent rules are added instead of returning false directly. -func (e *Enforcer) AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) { - return e.addPolicies("g", ptype, rules, true) -} - -// RemoveGroupingPolicy removes a role inheritance rule from the current policy. -func (e *Enforcer) RemoveGroupingPolicy(params ...interface{}) (bool, error) { - return e.RemoveNamedGroupingPolicy("g", params...) -} - -// RemoveGroupingPolicies removes role inheritance rules from the current policy. -func (e *Enforcer) RemoveGroupingPolicies(rules [][]string) (bool, error) { - return e.RemoveNamedGroupingPolicies("g", rules) -} - -// RemoveFilteredGroupingPolicy removes a role inheritance rule from the current policy, field filters can be specified. -func (e *Enforcer) RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) { - return e.RemoveFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) -} - -// RemoveNamedGroupingPolicy removes a role inheritance rule from the current named policy. -func (e *Enforcer) RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { - var ruleRemoved bool - var err error - if strSlice, ok := params[0].([]string); len(params) == 1 && ok { - ruleRemoved, err = e.removePolicy("g", ptype, strSlice) - } else { - policy := make([]string, 0) - for _, param := range params { - policy = append(policy, param.(string)) - } - - ruleRemoved, err = e.removePolicy("g", ptype, policy) - } - - return ruleRemoved, err -} - -// RemoveNamedGroupingPolicies removes role inheritance rules from the current named policy. -func (e *Enforcer) RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { - return e.removePolicies("g", ptype, rules) -} - -func (e *Enforcer) UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) { - return e.UpdateNamedGroupingPolicy("g", oldRule, newRule) -} - -// UpdateGroupingPolicies updates authorization rules from the current policies. -func (e *Enforcer) UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) { - return e.UpdateNamedGroupingPolicies("g", oldRules, newRules) -} - -func (e *Enforcer) UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) { - return e.updatePolicy("g", ptype, oldRule, newRule) -} - -func (e *Enforcer) UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) { - return e.updatePolicies("g", ptype, oldRules, newRules) -} - -// RemoveFilteredNamedGroupingPolicy removes a role inheritance rule from the current named policy, field filters can be specified. -func (e *Enforcer) RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { - return e.removeFilteredPolicy("g", ptype, fieldIndex, fieldValues) -} - -// AddFunction adds a customized function. -func (e *Enforcer) AddFunction(name string, function govaluate.ExpressionFunction) { - e.fm.AddFunction(name, function) -} - -func (e *Enforcer) SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) { - return e.addPolicyWithoutNotify(sec, ptype, rule) -} - -func (e *Enforcer) SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) { - return e.addPoliciesWithoutNotify(sec, ptype, rules, false) -} - -func (e *Enforcer) SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) { - return e.addPoliciesWithoutNotify(sec, ptype, rules, true) -} - -func (e *Enforcer) SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) { - return e.removePolicyWithoutNotify(sec, ptype, rule) -} - -func (e *Enforcer) SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { - return e.removePoliciesWithoutNotify(sec, ptype, rules) -} - -func (e *Enforcer) SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) { - return e.removeFilteredPolicyWithoutNotify(sec, ptype, fieldIndex, fieldValues) -} - -func (e *Enforcer) SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) { - return e.updatePolicyWithoutNotify(sec, ptype, oldRule, newRule) -} - -func (e *Enforcer) SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { - return e.updatePoliciesWithoutNotify(sec, ptype, oldRules, newRules) -} diff --git a/vendor/github.com/casbin/casbin/v2/model/assertion.go b/vendor/github.com/casbin/casbin/v2/model/assertion.go deleted file mode 100644 index 7c5381d7..00000000 --- a/vendor/github.com/casbin/casbin/v2/model/assertion.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "errors" - "strings" - - "github.com/casbin/casbin/v2/log" - "github.com/casbin/casbin/v2/rbac" -) - -// Assertion represents an expression in a section of the model. -// For example: r = sub, obj, act. -type Assertion struct { - Key string - Value string - Tokens []string - ParamsTokens []string - Policy [][]string - PolicyMap map[string]int - RM rbac.RoleManager - CondRM rbac.ConditionalRoleManager - FieldIndexMap map[string]int - - logger log.Logger -} - -func (ast *Assertion) buildIncrementalRoleLinks(rm rbac.RoleManager, op PolicyOp, rules [][]string) error { - ast.RM = rm - count := strings.Count(ast.Value, "_") - if count < 2 { - return errors.New("the number of \"_\" in role definition should be at least 2") - } - - for _, rule := range rules { - if len(rule) < count { - return errors.New("grouping policy elements do not meet role definition") - } - if len(rule) > count { - rule = rule[:count] - } - switch op { - case PolicyAdd: - err := rm.AddLink(rule[0], rule[1], rule[2:]...) - if err != nil { - return err - } - case PolicyRemove: - err := rm.DeleteLink(rule[0], rule[1], rule[2:]...) - if err != nil { - return err - } - } - } - return nil -} - -func (ast *Assertion) buildRoleLinks(rm rbac.RoleManager) error { - ast.RM = rm - count := strings.Count(ast.Value, "_") - if count < 2 { - return errors.New("the number of \"_\" in role definition should be at least 2") - } - for _, rule := range ast.Policy { - if len(rule) < count { - return errors.New("grouping policy elements do not meet role definition") - } - if len(rule) > count { - rule = rule[:count] - } - err := ast.RM.AddLink(rule[0], rule[1], rule[2:]...) - if err != nil { - return err - } - } - - return nil -} - -func (ast *Assertion) buildIncrementalConditionalRoleLinks(condRM rbac.ConditionalRoleManager, op PolicyOp, rules [][]string) error { - ast.CondRM = condRM - count := strings.Count(ast.Value, "_") - if count < 2 { - return errors.New("the number of \"_\" in role definition should be at least 2") - } - - for _, rule := range rules { - if len(rule) < count { - return errors.New("grouping policy elements do not meet role definition") - } - if len(rule) > count { - rule = rule[:count] - } - - var err error - domainRule := rule[2:len(ast.Tokens)] - - switch op { - case PolicyAdd: - err = ast.addConditionalRoleLink(rule, domainRule) - case PolicyRemove: - err = ast.CondRM.DeleteLink(rule[0], rule[1], rule[2:]...) - } - if err != nil { - return err - } - } - - return nil -} - -func (ast *Assertion) buildConditionalRoleLinks(condRM rbac.ConditionalRoleManager) error { - ast.CondRM = condRM - count := strings.Count(ast.Value, "_") - if count < 2 { - return errors.New("the number of \"_\" in role definition should be at least 2") - } - for _, rule := range ast.Policy { - if len(rule) < count { - return errors.New("grouping policy elements do not meet role definition") - } - if len(rule) > count { - rule = rule[:count] - } - - domainRule := rule[2:len(ast.Tokens)] - - err := ast.addConditionalRoleLink(rule, domainRule) - if err != nil { - return err - } - } - - return nil -} - -// addConditionalRoleLink adds Link to rbac.ConditionalRoleManager and sets the parameters for LinkConditionFunc. -func (ast *Assertion) addConditionalRoleLink(rule []string, domainRule []string) error { - var err error - if len(domainRule) == 0 { - err = ast.CondRM.AddLink(rule[0], rule[1]) - if err == nil { - ast.CondRM.SetLinkConditionFuncParams(rule[0], rule[1], rule[len(ast.Tokens):]...) - } - } else { - domain := domainRule[0] - err = ast.CondRM.AddLink(rule[0], rule[1], domain) - if err == nil { - ast.CondRM.SetDomainLinkConditionFuncParams(rule[0], rule[1], domain, rule[len(ast.Tokens):]...) - } - } - return err -} - -func (ast *Assertion) setLogger(logger log.Logger) { - ast.logger = logger -} - -func (ast *Assertion) copy() *Assertion { - tokens := append([]string(nil), ast.Tokens...) - policy := make([][]string, len(ast.Policy)) - - for i, p := range ast.Policy { - policy[i] = append(policy[i], p...) - } - policyMap := make(map[string]int) - for k, v := range ast.PolicyMap { - policyMap[k] = v - } - - newAst := &Assertion{ - Key: ast.Key, - Value: ast.Value, - PolicyMap: policyMap, - Tokens: tokens, - Policy: policy, - FieldIndexMap: ast.FieldIndexMap, - } - - return newAst -} diff --git a/vendor/github.com/casbin/casbin/v2/model/function.go b/vendor/github.com/casbin/casbin/v2/model/function.go deleted file mode 100644 index f1a8d007..00000000 --- a/vendor/github.com/casbin/casbin/v2/model/function.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sync" - - "github.com/casbin/casbin/v2/util" - "github.com/casbin/govaluate" -) - -// FunctionMap represents the collection of Function. -type FunctionMap struct { - fns *sync.Map -} - -// [string]govaluate.ExpressionFunction - -// AddFunction adds an expression function. -func (fm *FunctionMap) AddFunction(name string, function govaluate.ExpressionFunction) { - fm.fns.LoadOrStore(name, function) -} - -// LoadFunctionMap loads an initial function map. -func LoadFunctionMap() FunctionMap { - fm := &FunctionMap{} - fm.fns = &sync.Map{} - - fm.AddFunction("keyMatch", util.KeyMatchFunc) - fm.AddFunction("keyGet", util.KeyGetFunc) - fm.AddFunction("keyMatch2", util.KeyMatch2Func) - fm.AddFunction("keyGet2", util.KeyGet2Func) - fm.AddFunction("keyMatch3", util.KeyMatch3Func) - fm.AddFunction("keyGet3", util.KeyGet3Func) - fm.AddFunction("keyMatch4", util.KeyMatch4Func) - fm.AddFunction("keyMatch5", util.KeyMatch5Func) - fm.AddFunction("regexMatch", util.RegexMatchFunc) - fm.AddFunction("ipMatch", util.IPMatchFunc) - fm.AddFunction("globMatch", util.GlobMatchFunc) - - return *fm -} - -// GetFunctions return a map with all the functions. -func (fm *FunctionMap) GetFunctions() map[string]govaluate.ExpressionFunction { - ret := make(map[string]govaluate.ExpressionFunction) - - fm.fns.Range(func(k interface{}, v interface{}) bool { - ret[k.(string)] = v.(govaluate.ExpressionFunction) - return true - }) - - return ret -} diff --git a/vendor/github.com/casbin/casbin/v2/model/model.go b/vendor/github.com/casbin/casbin/v2/model/model.go deleted file mode 100644 index 938072f4..00000000 --- a/vendor/github.com/casbin/casbin/v2/model/model.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "container/list" - "errors" - "fmt" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/casbin/casbin/v2/config" - "github.com/casbin/casbin/v2/constant" - "github.com/casbin/casbin/v2/log" - "github.com/casbin/casbin/v2/util" -) - -// Model represents the whole access control model. -type Model map[string]AssertionMap - -// AssertionMap is the collection of assertions, can be "r", "p", "g", "e", "m". -type AssertionMap map[string]*Assertion - -const defaultDomain string = "" -const defaultSeparator = "::" - -var sectionNameMap = map[string]string{ - "r": "request_definition", - "p": "policy_definition", - "g": "role_definition", - "e": "policy_effect", - "m": "matchers", -} - -// Minimal required sections for a model to be valid. -var requiredSections = []string{"r", "p", "e", "m"} - -func loadAssertion(model Model, cfg config.ConfigInterface, sec string, key string) bool { - value := cfg.String(sectionNameMap[sec] + "::" + key) - return model.AddDef(sec, key, value) -} - -var paramsRegex = regexp.MustCompile(`\((.*?)\)`) - -// getParamsToken Get ParamsToken from Assertion.Value. -func getParamsToken(value string) []string { - paramsString := paramsRegex.FindString(value) - if paramsString == "" { - return nil - } - paramsString = strings.TrimSuffix(strings.TrimPrefix(paramsString, "("), ")") - return strings.Split(paramsString, ",") -} - -// AddDef adds an assertion to the model. -func (model Model) AddDef(sec string, key string, value string) bool { - if value == "" { - return false - } - - ast := Assertion{} - ast.Key = key - ast.Value = value - ast.PolicyMap = make(map[string]int) - ast.FieldIndexMap = make(map[string]int) - ast.setLogger(model.GetLogger()) - - if sec == "r" || sec == "p" { - ast.Tokens = strings.Split(ast.Value, ",") - for i := range ast.Tokens { - ast.Tokens[i] = key + "_" + strings.TrimSpace(ast.Tokens[i]) - } - } else if sec == "g" { - ast.ParamsTokens = getParamsToken(ast.Value) - ast.Tokens = strings.Split(ast.Value, ",") - ast.Tokens = ast.Tokens[:len(ast.Tokens)-len(ast.ParamsTokens)] - } else { - ast.Value = util.RemoveComments(util.EscapeAssertion(ast.Value)) - } - - if sec == "m" && strings.Contains(ast.Value, "in") { - ast.Value = strings.Replace(strings.Replace(ast.Value, "[", "(", -1), "]", ")", -1) - } - - _, ok := model[sec] - if !ok { - model[sec] = make(AssertionMap) - } - - model[sec][key] = &ast - return true -} - -func getKeySuffix(i int) string { - if i == 1 { - return "" - } - - return strconv.Itoa(i) -} - -func loadSection(model Model, cfg config.ConfigInterface, sec string) { - i := 1 - for { - if !loadAssertion(model, cfg, sec, sec+getKeySuffix(i)) { - break - } else { - i++ - } - } -} - -// SetLogger sets the model's logger. -func (model Model) SetLogger(logger log.Logger) { - for _, astMap := range model { - for _, ast := range astMap { - ast.logger = logger - } - } - model["logger"] = AssertionMap{"logger": &Assertion{logger: logger}} -} - -// GetLogger returns the model's logger. -func (model Model) GetLogger() log.Logger { - return model["logger"]["logger"].logger -} - -// NewModel creates an empty model. -func NewModel() Model { - m := make(Model) - m.SetLogger(&log.DefaultLogger{}) - - return m -} - -// NewModelFromFile creates a model from a .CONF file. -func NewModelFromFile(path string) (Model, error) { - m := NewModel() - - err := m.LoadModel(path) - if err != nil { - return nil, err - } - - return m, nil -} - -// NewModelFromString creates a model from a string which contains model text. -func NewModelFromString(text string) (Model, error) { - m := NewModel() - - err := m.LoadModelFromText(text) - if err != nil { - return nil, err - } - - return m, nil -} - -// LoadModel loads the model from model CONF file. -func (model Model) LoadModel(path string) error { - cfg, err := config.NewConfig(path) - if err != nil { - return err - } - - return model.loadModelFromConfig(cfg) -} - -// LoadModelFromText loads the model from the text. -func (model Model) LoadModelFromText(text string) error { - cfg, err := config.NewConfigFromText(text) - if err != nil { - return err - } - - return model.loadModelFromConfig(cfg) -} - -func (model Model) loadModelFromConfig(cfg config.ConfigInterface) error { - for s := range sectionNameMap { - loadSection(model, cfg, s) - } - ms := make([]string, 0) - for _, rs := range requiredSections { - if !model.hasSection(rs) { - ms = append(ms, sectionNameMap[rs]) - } - } - if len(ms) > 0 { - return fmt.Errorf("missing required sections: %s", strings.Join(ms, ",")) - } - return nil -} - -func (model Model) hasSection(sec string) bool { - section := model[sec] - return section != nil -} - -func (model Model) GetAssertion(sec string, ptype string) (*Assertion, error) { - if model[sec] == nil { - return nil, fmt.Errorf("missing required section %s", sec) - } - if model[sec][ptype] == nil { - return nil, fmt.Errorf("missiong required definition %s in section %s", ptype, sec) - } - return model[sec][ptype], nil -} - -// PrintModel prints the model to the log. -func (model Model) PrintModel() { - if !model.GetLogger().IsEnabled() { - return - } - - var modelInfo [][]string - for k, v := range model { - if k == "logger" { - continue - } - - for i, j := range v { - modelInfo = append(modelInfo, []string{k, i, j.Value}) - } - } - - model.GetLogger().LogModel(modelInfo) -} - -func (model Model) SortPoliciesBySubjectHierarchy() error { - if model["e"]["e"].Value != constant.SubjectPriorityEffect { - return nil - } - g, err := model.GetAssertion("g", "g") - if err != nil { - return err - } - subIndex := 0 - for ptype, assertion := range model["p"] { - domainIndex, err := model.GetFieldIndex(ptype, constant.DomainIndex) - if err != nil { - domainIndex = -1 - } - policies := assertion.Policy - subjectHierarchyMap, err := getSubjectHierarchyMap(g.Policy) - if err != nil { - return err - } - sort.SliceStable(policies, func(i, j int) bool { - domain1, domain2 := defaultDomain, defaultDomain - if domainIndex != -1 { - domain1 = policies[i][domainIndex] - domain2 = policies[j][domainIndex] - } - name1, name2 := getNameWithDomain(domain1, policies[i][subIndex]), getNameWithDomain(domain2, policies[j][subIndex]) - p1 := subjectHierarchyMap[name1] - p2 := subjectHierarchyMap[name2] - return p1 > p2 - }) - for i, policy := range assertion.Policy { - assertion.PolicyMap[strings.Join(policy, ",")] = i - } - } - return nil -} - -func getSubjectHierarchyMap(policies [][]string) (map[string]int, error) { - subjectHierarchyMap := make(map[string]int) - // Tree structure of role - policyMap := make(map[string][]string) - for _, policy := range policies { - if len(policy) < 2 { - return nil, errors.New("policy g expect 2 more params") - } - domain := defaultDomain - if len(policy) != 2 { - domain = policy[2] - } - child := getNameWithDomain(domain, policy[0]) - parent := getNameWithDomain(domain, policy[1]) - policyMap[parent] = append(policyMap[parent], child) - if _, ok := subjectHierarchyMap[child]; !ok { - subjectHierarchyMap[child] = 0 - } - if _, ok := subjectHierarchyMap[parent]; !ok { - subjectHierarchyMap[parent] = 0 - } - subjectHierarchyMap[child] = 1 - } - // Use queues for levelOrder - queue := list.New() - for k, v := range subjectHierarchyMap { - root := k - if v != 0 { - continue - } - lv := 0 - queue.PushBack(root) - for queue.Len() != 0 { - sz := queue.Len() - for i := 0; i < sz; i++ { - node := queue.Front() - queue.Remove(node) - nodeValue := node.Value.(string) - subjectHierarchyMap[nodeValue] = lv - if _, ok := policyMap[nodeValue]; ok { - for _, child := range policyMap[nodeValue] { - queue.PushBack(child) - } - } - } - lv++ - } - } - return subjectHierarchyMap, nil -} - -func getNameWithDomain(domain string, name string) string { - return domain + defaultSeparator + name -} - -func (model Model) SortPoliciesByPriority() error { - for ptype, assertion := range model["p"] { - priorityIndex, err := model.GetFieldIndex(ptype, constant.PriorityIndex) - if err != nil { - continue - } - policies := assertion.Policy - sort.SliceStable(policies, func(i, j int) bool { - p1, err := strconv.Atoi(policies[i][priorityIndex]) - if err != nil { - return true - } - p2, err := strconv.Atoi(policies[j][priorityIndex]) - if err != nil { - return true - } - return p1 < p2 - }) - for i, policy := range assertion.Policy { - assertion.PolicyMap[strings.Join(policy, ",")] = i - } - } - return nil -} - -func (model Model) ToText() string { - tokenPatterns := make(map[string]string) - - pPattern, rPattern := regexp.MustCompile("^p_"), regexp.MustCompile("^r_") - for _, ptype := range []string{"r", "p"} { - for _, token := range model[ptype][ptype].Tokens { - tokenPatterns[token] = rPattern.ReplaceAllString(pPattern.ReplaceAllString(token, "p."), "r.") - } - } - if strings.Contains(model["e"]["e"].Value, "p_eft") { - tokenPatterns["p_eft"] = "p.eft" - } - s := strings.Builder{} - writeString := func(sec string) { - for ptype := range model[sec] { - value := model[sec][ptype].Value - for tokenPattern, newToken := range tokenPatterns { - value = strings.Replace(value, tokenPattern, newToken, -1) - } - s.WriteString(fmt.Sprintf("%s = %s\n", sec, value)) - } - } - s.WriteString("[request_definition]\n") - writeString("r") - s.WriteString("[policy_definition]\n") - writeString("p") - if _, ok := model["g"]; ok { - s.WriteString("[role_definition]\n") - for ptype := range model["g"] { - s.WriteString(fmt.Sprintf("%s = %s\n", ptype, model["g"][ptype].Value)) - } - } - s.WriteString("[policy_effect]\n") - writeString("e") - s.WriteString("[matchers]\n") - writeString("m") - return s.String() -} - -func (model Model) Copy() Model { - newModel := NewModel() - - for sec, m := range model { - newAstMap := make(AssertionMap) - for ptype, ast := range m { - newAstMap[ptype] = ast.copy() - } - newModel[sec] = newAstMap - } - - newModel.SetLogger(model.GetLogger()) - return newModel -} - -func (model Model) GetFieldIndex(ptype string, field string) (int, error) { - assertion := model["p"][ptype] - if index, ok := assertion.FieldIndexMap[field]; ok { - return index, nil - } - pattern := fmt.Sprintf("%s_"+field, ptype) - index := -1 - for i, token := range assertion.Tokens { - if token == pattern { - index = i - break - } - } - if index == -1 { - return index, fmt.Errorf(field + " index is not set, please use enforcer.SetFieldIndex() to set index") - } - assertion.FieldIndexMap[field] = index - return index, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/model/policy.go b/vendor/github.com/casbin/casbin/v2/model/policy.go deleted file mode 100644 index 76b07368..00000000 --- a/vendor/github.com/casbin/casbin/v2/model/policy.go +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" - "strings" - - "github.com/casbin/casbin/v2/constant" - "github.com/casbin/casbin/v2/rbac" - "github.com/casbin/casbin/v2/util" -) - -type ( - PolicyOp int -) - -const ( - PolicyAdd PolicyOp = iota - PolicyRemove -) - -const DefaultSep = "," - -// BuildIncrementalRoleLinks provides incremental build the role inheritance relations. -func (model Model) BuildIncrementalRoleLinks(rmMap map[string]rbac.RoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { - if sec == "g" && rmMap[ptype] != nil { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return err - } - return model[sec][ptype].buildIncrementalRoleLinks(rmMap[ptype], op, rules) - } - return nil -} - -// BuildRoleLinks initializes the roles in RBAC. -func (model Model) BuildRoleLinks(rmMap map[string]rbac.RoleManager) error { - model.PrintPolicy() - for ptype, ast := range model["g"] { - if rm := rmMap[ptype]; rm != nil { - err := ast.buildRoleLinks(rm) - if err != nil { - return err - } - } - } - - return nil -} - -// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations. -func (model Model) BuildIncrementalConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { - if sec == "g" && condRmMap[ptype] != nil { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return err - } - return model[sec][ptype].buildIncrementalConditionalRoleLinks(condRmMap[ptype], op, rules) - } - return nil -} - -// BuildConditionalRoleLinks initializes the roles in RBAC. -func (model Model) BuildConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager) error { - model.PrintPolicy() - for ptype, ast := range model["g"] { - if condRm := condRmMap[ptype]; condRm != nil { - err := ast.buildConditionalRoleLinks(condRm) - if err != nil { - return err - } - } - } - - return nil -} - -// PrintPolicy prints the policy to log. -func (model Model) PrintPolicy() { - if !model.GetLogger().IsEnabled() { - return - } - - policy := make(map[string][][]string) - - for key, ast := range model["p"] { - value, found := policy[key] - if found { - value = append(value, ast.Policy...) - policy[key] = value - } else { - policy[key] = ast.Policy - } - } - - for key, ast := range model["g"] { - value, found := policy[key] - if found { - value = append(value, ast.Policy...) - policy[key] = value - } else { - policy[key] = ast.Policy - } - } - - model.GetLogger().LogPolicy(policy) -} - -// ClearPolicy clears all current policy. -func (model Model) ClearPolicy() { - for _, ast := range model["p"] { - ast.Policy = nil - ast.PolicyMap = map[string]int{} - } - - for _, ast := range model["g"] { - ast.Policy = nil - ast.PolicyMap = map[string]int{} - } -} - -// GetPolicy gets all rules in a policy. -func (model Model) GetPolicy(sec string, ptype string) ([][]string, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return nil, err - } - return model[sec][ptype].Policy, nil -} - -// GetFilteredPolicy gets rules based on field filters from a policy. -func (model Model) GetFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return nil, err - } - res := [][]string{} - - for _, rule := range model[sec][ptype].Policy { - matched := true - for i, fieldValue := range fieldValues { - if fieldValue != "" && rule[fieldIndex+i] != fieldValue { - matched = false - break - } - } - - if matched { - res = append(res, rule) - } - } - - return res, nil -} - -// HasPolicyEx determines whether a model has the specified policy rule with error. -func (model Model) HasPolicyEx(sec string, ptype string, rule []string) (bool, error) { - assertion, err := model.GetAssertion(sec, ptype) - if err != nil { - return false, err - } - switch sec { - case "p": - if len(rule) != len(assertion.Tokens) { - return false, fmt.Errorf( - "invalid policy rule size: expected %d, got %d, rule: %v", - len(model["p"][ptype].Tokens), - len(rule), - rule) - } - case "g": - if len(rule) < len(assertion.Tokens) { - return false, fmt.Errorf( - "invalid policy rule size: expected %d, got %d, rule: %v", - len(model["g"][ptype].Tokens), - len(rule), - rule) - } - } - return model.HasPolicy(sec, ptype, rule) -} - -// HasPolicy determines whether a model has the specified policy rule. -func (model Model) HasPolicy(sec string, ptype string, rule []string) (bool, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return false, err - } - _, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] - return ok, nil -} - -// HasPolicies determines whether a model has any of the specified policies. If one is found we return true. -func (model Model) HasPolicies(sec string, ptype string, rules [][]string) (bool, error) { - for i := 0; i < len(rules); i++ { - ok, err := model.HasPolicy(sec, ptype, rules[i]) - if err != nil { - return false, err - } - if ok { - return true, nil - } - } - - return false, nil -} - -// AddPolicy adds a policy rule to the model. -func (model Model) AddPolicy(sec string, ptype string, rule []string) error { - assertion, err := model.GetAssertion(sec, ptype) - if err != nil { - return err - } - assertion.Policy = append(assertion.Policy, rule) - assertion.PolicyMap[strings.Join(rule, DefaultSep)] = len(model[sec][ptype].Policy) - 1 - - hasPriority := false - if _, ok := assertion.FieldIndexMap[constant.PriorityIndex]; ok { - hasPriority = true - } - if sec == "p" && hasPriority { - if idxInsert, err := strconv.Atoi(rule[assertion.FieldIndexMap[constant.PriorityIndex]]); err == nil { - i := len(assertion.Policy) - 1 - for ; i > 0; i-- { - idx, err := strconv.Atoi(assertion.Policy[i-1][assertion.FieldIndexMap[constant.PriorityIndex]]) - if err != nil || idx <= idxInsert { - break - } - assertion.Policy[i] = assertion.Policy[i-1] - assertion.PolicyMap[strings.Join(assertion.Policy[i-1], DefaultSep)]++ - } - assertion.Policy[i] = rule - assertion.PolicyMap[strings.Join(rule, DefaultSep)] = i - } - } - return nil -} - -// AddPolicies adds policy rules to the model. -func (model Model) AddPolicies(sec string, ptype string, rules [][]string) error { - _, err := model.AddPoliciesWithAffected(sec, ptype, rules) - return err -} - -// AddPoliciesWithAffected adds policy rules to the model, and returns affected rules. -func (model Model) AddPoliciesWithAffected(sec string, ptype string, rules [][]string) ([][]string, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return nil, err - } - var affected [][]string - for _, rule := range rules { - hashKey := strings.Join(rule, DefaultSep) - _, ok := model[sec][ptype].PolicyMap[hashKey] - if ok { - continue - } - affected = append(affected, rule) - err = model.AddPolicy(sec, ptype, rule) - if err != nil { - return affected, err - } - } - return affected, err -} - -// RemovePolicy removes a policy rule from the model. -// Deprecated: Using AddPoliciesWithAffected instead. -func (model Model) RemovePolicy(sec string, ptype string, rule []string) (bool, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return false, err - } - index, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] - if !ok { - return false, err - } - - model[sec][ptype].Policy = append(model[sec][ptype].Policy[:index], model[sec][ptype].Policy[index+1:]...) - delete(model[sec][ptype].PolicyMap, strings.Join(rule, DefaultSep)) - for i := index; i < len(model[sec][ptype].Policy); i++ { - model[sec][ptype].PolicyMap[strings.Join(model[sec][ptype].Policy[i], DefaultSep)] = i - } - - return true, err -} - -// UpdatePolicy updates a policy rule from the model. -func (model Model) UpdatePolicy(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return false, err - } - oldPolicy := strings.Join(oldRule, DefaultSep) - index, ok := model[sec][ptype].PolicyMap[oldPolicy] - if !ok { - return false, nil - } - - model[sec][ptype].Policy[index] = newRule - delete(model[sec][ptype].PolicyMap, oldPolicy) - model[sec][ptype].PolicyMap[strings.Join(newRule, DefaultSep)] = index - - return true, nil -} - -// UpdatePolicies updates a policy rule from the model. -func (model Model) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return false, err - } - rollbackFlag := false - // index -> []{oldIndex, newIndex} - modifiedRuleIndex := make(map[int][]int) - // rollback - defer func() { - if rollbackFlag { - for index, oldNewIndex := range modifiedRuleIndex { - model[sec][ptype].Policy[index] = oldRules[oldNewIndex[0]] - oldPolicy := strings.Join(oldRules[oldNewIndex[0]], DefaultSep) - newPolicy := strings.Join(newRules[oldNewIndex[1]], DefaultSep) - delete(model[sec][ptype].PolicyMap, newPolicy) - model[sec][ptype].PolicyMap[oldPolicy] = index - } - } - }() - - newIndex := 0 - for oldIndex, oldRule := range oldRules { - oldPolicy := strings.Join(oldRule, DefaultSep) - index, ok := model[sec][ptype].PolicyMap[oldPolicy] - if !ok { - rollbackFlag = true - return false, nil - } - - model[sec][ptype].Policy[index] = newRules[newIndex] - delete(model[sec][ptype].PolicyMap, oldPolicy) - model[sec][ptype].PolicyMap[strings.Join(newRules[newIndex], DefaultSep)] = index - modifiedRuleIndex[index] = []int{oldIndex, newIndex} - newIndex++ - } - - return true, nil -} - -// RemovePolicies removes policy rules from the model. -func (model Model) RemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { - affected, err := model.RemovePoliciesWithAffected(sec, ptype, rules) - return len(affected) != 0, err -} - -// RemovePoliciesWithAffected removes policy rules from the model, and returns affected rules. -func (model Model) RemovePoliciesWithAffected(sec string, ptype string, rules [][]string) ([][]string, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return nil, err - } - var affected [][]string - for _, rule := range rules { - index, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] - if !ok { - continue - } - - affected = append(affected, rule) - model[sec][ptype].Policy = append(model[sec][ptype].Policy[:index], model[sec][ptype].Policy[index+1:]...) - delete(model[sec][ptype].PolicyMap, strings.Join(rule, DefaultSep)) - for i := index; i < len(model[sec][ptype].Policy); i++ { - model[sec][ptype].PolicyMap[strings.Join(model[sec][ptype].Policy[i], DefaultSep)] = i - } - } - return affected, nil -} - -// RemoveFilteredPolicy removes policy rules based on field filters from the model. -func (model Model) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, [][]string, error) { - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return false, nil, err - } - var tmp [][]string - var effects [][]string - res := false - model[sec][ptype].PolicyMap = map[string]int{} - - for _, rule := range model[sec][ptype].Policy { - matched := true - for i, fieldValue := range fieldValues { - if fieldValue != "" && rule[fieldIndex+i] != fieldValue { - matched = false - break - } - } - - if matched { - effects = append(effects, rule) - } else { - tmp = append(tmp, rule) - model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] = len(tmp) - 1 - } - } - - if len(tmp) != len(model[sec][ptype].Policy) { - model[sec][ptype].Policy = tmp - res = true - } - - return res, effects, nil -} - -// GetValuesForFieldInPolicy gets all values for a field for all rules in a policy, duplicated values are removed. -func (model Model) GetValuesForFieldInPolicy(sec string, ptype string, fieldIndex int) ([]string, error) { - values := []string{} - - _, err := model.GetAssertion(sec, ptype) - if err != nil { - return nil, err - } - - for _, rule := range model[sec][ptype].Policy { - values = append(values, rule[fieldIndex]) - } - - util.ArrayRemoveDuplicates(&values) - - return values, nil -} - -// GetValuesForFieldInPolicyAllTypes gets all values for a field for all rules in a policy of all ptypes, duplicated values are removed. -func (model Model) GetValuesForFieldInPolicyAllTypes(sec string, fieldIndex int) ([]string, error) { - values := []string{} - - for ptype := range model[sec] { - v, err := model.GetValuesForFieldInPolicy(sec, ptype, fieldIndex) - if err != nil { - return nil, err - } - values = append(values, v...) - } - - util.ArrayRemoveDuplicates(&values) - - return values, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter.go b/vendor/github.com/casbin/casbin/v2/persist/adapter.go deleted file mode 100644 index 0525657a..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/adapter.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -import ( - "encoding/csv" - "strings" - - "github.com/casbin/casbin/v2/model" -) - -// LoadPolicyLine loads a text line as a policy rule to model. -func LoadPolicyLine(line string, m model.Model) error { - if line == "" || strings.HasPrefix(line, "#") { - return nil - } - - r := csv.NewReader(strings.NewReader(line)) - r.Comma = ',' - r.Comment = '#' - r.TrimLeadingSpace = true - - tokens, err := r.Read() - if err != nil { - return err - } - - return LoadPolicyArray(tokens, m) -} - -// LoadPolicyArray loads a policy rule to model. -func LoadPolicyArray(rule []string, m model.Model) error { - key := rule[0] - sec := key[:1] - ok, err := m.HasPolicyEx(sec, key, rule[1:]) - if err != nil { - return err - } - if ok { - return nil // skip duplicated policy - } - m.AddPolicy(sec, key, rule[1:]) - return nil -} - -// Adapter is the interface for Casbin adapters. -type Adapter interface { - // LoadPolicy loads all policy rules from the storage. - LoadPolicy(model model.Model) error - // SavePolicy saves all policy rules to the storage. - SavePolicy(model model.Model) error - - // AddPolicy adds a policy rule to the storage. - // This is part of the Auto-Save feature. - AddPolicy(sec string, ptype string, rule []string) error - // RemovePolicy removes a policy rule from the storage. - // This is part of the Auto-Save feature. - RemovePolicy(sec string, ptype string, rule []string) error - // RemoveFilteredPolicy removes policy rules that match the filter from the storage. - // This is part of the Auto-Save feature. - RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go deleted file mode 100644 index bda78a7e..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2023 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -import ( - "context" - - "github.com/casbin/casbin/v2/model" -) - -// ContextAdapter provides a context-aware interface for Casbin adapters. -type ContextAdapter interface { - // LoadPolicyCtx loads all policy rules from the storage with context. - LoadPolicyCtx(ctx context.Context, model model.Model) error - // SavePolicyCtx saves all policy rules to the storage with context. - SavePolicyCtx(ctx context.Context, model model.Model) error - - // AddPolicyCtx adds a policy rule to the storage with context. - // This is part of the Auto-Save feature. - AddPolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error - // RemovePolicyCtx removes a policy rule from the storage with context. - // This is part of the Auto-Save feature. - RemovePolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error - // RemoveFilteredPolicyCtx removes policy rules that match the filter from the storage with context. - // This is part of the Auto-Save feature. - RemoveFilteredPolicyCtx(ctx context.Context, sec string, ptype string, fieldIndex int, fieldValues ...string) error -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go deleted file mode 100644 index 82c9a0e7..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -import ( - "github.com/casbin/casbin/v2/model" -) - -// FilteredAdapter is the interface for Casbin adapters supporting filtered policies. -type FilteredAdapter interface { - Adapter - - // LoadFilteredPolicy loads only policy rules that match the filter. - LoadFilteredPolicy(model model.Model, filter interface{}) error - // IsFiltered returns true if the loaded policy has been filtered. - IsFiltered() bool -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go deleted file mode 100644 index 7893ce1b..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -import ( - "context" - - "github.com/casbin/casbin/v2/model" -) - -// ContextFilteredAdapter is the context-aware interface for Casbin adapters supporting filtered policies. -type ContextFilteredAdapter interface { - ContextAdapter - - // LoadFilteredPolicyCtx loads only policy rules that match the filter. - LoadFilteredPolicyCtx(ctx context.Context, model model.Model, filter interface{}) error - // IsFilteredCtx returns true if the loaded policy has been filtered. - IsFilteredCtx(ctx context.Context) bool -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go deleted file mode 100644 index 56ec415f..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -// BatchAdapter is the interface for Casbin adapters with multiple add and remove policy functions. -type BatchAdapter interface { - Adapter - // AddPolicies adds policy rules to the storage. - // This is part of the Auto-Save feature. - AddPolicies(sec string, ptype string, rules [][]string) error - // RemovePolicies removes policy rules from the storage. - // This is part of the Auto-Save feature. - RemovePolicies(sec string, ptype string, rules [][]string) error -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go deleted file mode 100644 index 741c184d..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2024 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -import "context" - -// ContextBatchAdapter is the context-aware interface for Casbin adapters with multiple add and remove policy functions. -type ContextBatchAdapter interface { - ContextAdapter - - // AddPoliciesCtx adds policy rules to the storage. - // This is part of the Auto-Save feature. - AddPoliciesCtx(ctx context.Context, sec string, ptype string, rules [][]string) error - // RemovePoliciesCtx removes policy rules from the storage. - // This is part of the Auto-Save feature. - RemovePoliciesCtx(ctx context.Context, sec string, ptype string, rules [][]string) error -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go b/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go deleted file mode 100644 index 08447b83..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2021 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import "errors" - -var ErrNoSuchKey = errors.New("there's no such key existing in cache") - -type Cache interface { - // Set puts key and value into cache. - // First parameter for extra should be time.Time object denoting expected survival time. - // If survival time equals 0 or less, the key will always be survival. - Set(key string, value bool, extra ...interface{}) error - - // Get returns result for key, - // If there's no such key existing in cache, - // ErrNoSuchKey will be returned. - Get(key string) (bool, error) - - // Delete will remove the specific key in cache. - // If there's no such key existing in cache, - // ErrNoSuchKey will be returned. - Delete(key string) error - - // Clear deletes all the items stored in cache. - Clear() error -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go b/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go deleted file mode 100644 index 816e12dc..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2021 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "sync" - "time" -) - -type SyncCache struct { - cache DefaultCache - sync.RWMutex -} - -func (c *SyncCache) Set(key string, value bool, extra ...interface{}) error { - ttl := time.Duration(-1) - if len(extra) > 0 { - ttl = extra[0].(time.Duration) - } - c.Lock() - defer c.Unlock() - c.cache[key] = cacheItem{ - value: value, - expiresAt: time.Now().Add(ttl), - ttl: ttl, - } - return nil -} - -func (c *SyncCache) Get(key string) (bool, error) { - c.RLock() - res, ok := c.cache[key] - c.RUnlock() - if !ok { - return false, ErrNoSuchKey - } else { - if res.ttl > 0 && time.Now().After(res.expiresAt) { - c.Lock() - defer c.Unlock() - delete(c.cache, key) - return false, ErrNoSuchKey - } - return res.value, nil - } -} - -func (c *SyncCache) Delete(key string) error { - c.RLock() - _, ok := c.cache[key] - c.RUnlock() - if !ok { - return ErrNoSuchKey - } else { - c.Lock() - defer c.Unlock() - delete(c.cache, key) - return nil - } -} - -func (c *SyncCache) Clear() error { - c.Lock() - c.cache = make(DefaultCache) - c.Unlock() - return nil -} - -func NewSyncCache() (Cache, error) { - cache := SyncCache{ - make(DefaultCache), - sync.RWMutex{}, - } - return &cache, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go b/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go deleted file mode 100644 index 9108e7d6..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2021 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import "time" - -type cacheItem struct { - value bool - expiresAt time.Time - ttl time.Duration -} - -type DefaultCache map[string]cacheItem - -func (c *DefaultCache) Set(key string, value bool, extra ...interface{}) error { - ttl := time.Duration(-1) - if len(extra) > 0 { - ttl = extra[0].(time.Duration) - } - (*c)[key] = cacheItem{ - value: value, - expiresAt: time.Now().Add(ttl), - ttl: ttl, - } - return nil -} - -func (c *DefaultCache) Get(key string) (bool, error) { - if res, ok := (*c)[key]; !ok { - return false, ErrNoSuchKey - } else { - if res.ttl > 0 && time.Now().After(res.expiresAt) { - delete(*c, key) - return false, ErrNoSuchKey - } - return res.value, nil - } -} - -func (c *DefaultCache) Delete(key string) error { - if _, ok := (*c)[key]; !ok { - return ErrNoSuchKey - } else { - delete(*c, key) - return nil - } -} - -func (c *DefaultCache) Clear() error { - *c = make(DefaultCache) - return nil -} - -func NewDefaultCache() (Cache, error) { - cache := make(DefaultCache) - return &cache, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go b/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go deleted file mode 100644 index ceaed838..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2020 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -// Dispatcher is the interface for Casbin dispatcher. -type Dispatcher interface { - // AddPolicies adds policies rule to all instance. - AddPolicies(sec string, ptype string, rules [][]string) error - // RemovePolicies removes policies rule from all instance. - RemovePolicies(sec string, ptype string, rules [][]string) error - // RemoveFilteredPolicy removes policy rules that match the filter from all instance. - RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error - // ClearPolicy clears all current policy in all instances - ClearPolicy() error - // UpdatePolicy updates policy rule from all instance. - UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error - // UpdatePolicies updates some policy rules from all instance - UpdatePolicies(sec string, ptype string, oldrules, newRules [][]string) error - // UpdateFilteredPolicies deletes old rules and adds new rules. - UpdateFilteredPolicies(sec string, ptype string, oldRules [][]string, newRules [][]string) error -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go deleted file mode 100644 index c68f0eaa..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileadapter - -import ( - "bufio" - "bytes" - "errors" - "os" - "strings" - - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" - "github.com/casbin/casbin/v2/util" -) - -// Adapter is the file adapter for Casbin. -// It can load policy from file or save policy to file. -type Adapter struct { - filePath string -} - -func (a *Adapter) UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error { - return errors.New("not implemented") -} - -func (a *Adapter) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error { - return errors.New("not implemented") -} - -func (a *Adapter) UpdateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) { - return nil, errors.New("not implemented") -} - -// NewAdapter is the constructor for Adapter. -func NewAdapter(filePath string) *Adapter { - return &Adapter{filePath: filePath} -} - -// LoadPolicy loads all policy rules from the storage. -func (a *Adapter) LoadPolicy(model model.Model) error { - if a.filePath == "" { - return errors.New("invalid file path, file path cannot be empty") - } - - return a.loadPolicyFile(model, persist.LoadPolicyLine) -} - -// SavePolicy saves all policy rules to the storage. -func (a *Adapter) SavePolicy(model model.Model) error { - if a.filePath == "" { - return errors.New("invalid file path, file path cannot be empty") - } - - var tmp bytes.Buffer - - for ptype, ast := range model["p"] { - for _, rule := range ast.Policy { - tmp.WriteString(ptype + ", ") - tmp.WriteString(util.ArrayToString(rule)) - tmp.WriteString("\n") - } - } - - for ptype, ast := range model["g"] { - for _, rule := range ast.Policy { - tmp.WriteString(ptype + ", ") - tmp.WriteString(util.ArrayToString(rule)) - tmp.WriteString("\n") - } - } - - return a.savePolicyFile(strings.TrimRight(tmp.String(), "\n")) -} - -func (a *Adapter) loadPolicyFile(model model.Model, handler func(string, model.Model) error) error { - f, err := os.Open(a.filePath) - if err != nil { - return err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - err = handler(line, model) - if err != nil { - return err - } - } - return scanner.Err() -} - -func (a *Adapter) savePolicyFile(text string) error { - f, err := os.Create(a.filePath) - if err != nil { - return err - } - w := bufio.NewWriter(f) - - _, err = w.WriteString(text) - if err != nil { - return err - } - - err = w.Flush() - if err != nil { - return err - } - - return f.Close() -} - -// AddPolicy adds a policy rule to the storage. -func (a *Adapter) AddPolicy(sec string, ptype string, rule []string) error { - return errors.New("not implemented") -} - -// AddPolicies adds policy rules to the storage. -func (a *Adapter) AddPolicies(sec string, ptype string, rules [][]string) error { - return errors.New("not implemented") -} - -// RemovePolicy removes a policy rule from the storage. -func (a *Adapter) RemovePolicy(sec string, ptype string, rule []string) error { - return errors.New("not implemented") -} - -// RemovePolicies removes policy rules from the storage. -func (a *Adapter) RemovePolicies(sec string, ptype string, rules [][]string) error { - return errors.New("not implemented") -} - -// RemoveFilteredPolicy removes policy rules that match the filter from the storage. -func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { - return errors.New("not implemented") -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go deleted file mode 100644 index 1a074c9a..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileadapter - -import ( - "bufio" - "errors" - "os" - "strings" - - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" -) - -// FilteredAdapter is the filtered file adapter for Casbin. It can load policy -// from file or save policy to file and supports loading of filtered policies. -type FilteredAdapter struct { - *Adapter - filtered bool -} - -// Filter defines the filtering rules for a FilteredAdapter's policy. Empty values -// are ignored, but all others must match the filter. -type Filter struct { - P []string - G []string - G1 []string - G2 []string - G3 []string - G4 []string - G5 []string -} - -// NewFilteredAdapter is the constructor for FilteredAdapter. -func NewFilteredAdapter(filePath string) *FilteredAdapter { - a := FilteredAdapter{} - a.filtered = true - a.Adapter = NewAdapter(filePath) - return &a -} - -// LoadPolicy loads all policy rules from the storage. -func (a *FilteredAdapter) LoadPolicy(model model.Model) error { - a.filtered = false - return a.Adapter.LoadPolicy(model) -} - -// LoadFilteredPolicy loads only policy rules that match the filter. -func (a *FilteredAdapter) LoadFilteredPolicy(model model.Model, filter interface{}) error { - if filter == nil { - return a.LoadPolicy(model) - } - if a.filePath == "" { - return errors.New("invalid file path, file path cannot be empty") - } - - filterValue, ok := filter.(*Filter) - if !ok { - return errors.New("invalid filter type") - } - err := a.loadFilteredPolicyFile(model, filterValue, persist.LoadPolicyLine) - if err == nil { - a.filtered = true - } - return err -} - -func (a *FilteredAdapter) loadFilteredPolicyFile(model model.Model, filter *Filter, handler func(string, model.Model) error) error { - f, err := os.Open(a.filePath) - if err != nil { - return err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - - if filterLine(line, filter) { - continue - } - - err = handler(line, model) - if err != nil { - return err - } - } - return scanner.Err() -} - -// IsFiltered returns true if the loaded policy has been filtered. -func (a *FilteredAdapter) IsFiltered() bool { - return a.filtered -} - -// SavePolicy saves all policy rules to the storage. -func (a *FilteredAdapter) SavePolicy(model model.Model) error { - if a.filtered { - return errors.New("cannot save a filtered policy") - } - return a.Adapter.SavePolicy(model) -} - -func filterLine(line string, filter *Filter) bool { - if filter == nil { - return false - } - p := strings.Split(line, ",") - if len(p) == 0 { - return true - } - var filterSlice []string - switch strings.TrimSpace(p[0]) { - case "p": - filterSlice = filter.P - case "g": - filterSlice = filter.G - case "g1": - filterSlice = filter.G1 - case "g2": - filterSlice = filter.G2 - case "g3": - filterSlice = filter.G3 - case "g4": - filterSlice = filter.G4 - case "g5": - filterSlice = filter.G5 - } - return filterWords(p, filterSlice) -} - -func filterWords(line []string, filter []string) bool { - if len(line) < len(filter)+1 { - return true - } - var skipLine bool - for i, v := range filter { - if len(v) > 0 && strings.TrimSpace(v) != strings.TrimSpace(line[i+1]) { - skipLine = true - break - } - } - return skipLine -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go deleted file mode 100644 index fcc5f821..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileadapter - -import ( - "bufio" - "errors" - "io" - "os" - "strings" - - "github.com/casbin/casbin/v2/model" - "github.com/casbin/casbin/v2/persist" -) - -// AdapterMock is the file adapter for Casbin. -// It can load policy from file or save policy to file. -type AdapterMock struct { - filePath string - errorValue string -} - -// NewAdapterMock is the constructor for AdapterMock. -func NewAdapterMock(filePath string) *AdapterMock { - a := AdapterMock{} - a.filePath = filePath - return &a -} - -// LoadPolicy loads all policy rules from the storage. -func (a *AdapterMock) LoadPolicy(model model.Model) error { - err := a.loadPolicyFile(model, persist.LoadPolicyLine) - return err -} - -// SavePolicy saves all policy rules to the storage. -func (a *AdapterMock) SavePolicy(model model.Model) error { - return nil -} - -func (a *AdapterMock) loadPolicyFile(model model.Model, handler func(string, model.Model) error) error { - f, err := os.Open(a.filePath) - if err != nil { - return err - } - defer f.Close() - - buf := bufio.NewReader(f) - for { - line, err := buf.ReadString('\n') - line = strings.TrimSpace(line) - if err2 := handler(line, model); err2 != nil { - return err2 - } - if err != nil { - if err == io.EOF { - return nil - } - return err - } - } -} - -// SetMockErr sets string to be returned by of the mock during testing. -func (a *AdapterMock) SetMockErr(errorToSet string) { - a.errorValue = errorToSet -} - -// GetMockErr returns a mock error or nil. -func (a *AdapterMock) GetMockErr() error { - var returnError error - if a.errorValue != "" { - returnError = errors.New(a.errorValue) - } - return returnError -} - -// AddPolicy adds a policy rule to the storage. -func (a *AdapterMock) AddPolicy(sec string, ptype string, rule []string) error { - return a.GetMockErr() -} - -// AddPolicies removes policy rules from the storage. -func (a *AdapterMock) AddPolicies(sec string, ptype string, rules [][]string) error { - return a.GetMockErr() -} - -// RemovePolicy removes a policy rule from the storage. -func (a *AdapterMock) RemovePolicy(sec string, ptype string, rule []string) error { - return a.GetMockErr() -} - -// RemovePolicies removes policy rules from the storage. -func (a *AdapterMock) RemovePolicies(sec string, ptype string, rules [][]string) error { - return a.GetMockErr() -} - -// UpdatePolicy removes a policy rule from the storage. -func (a *AdapterMock) UpdatePolicy(sec string, ptype string, oldRule, newPolicy []string) error { - return a.GetMockErr() -} - -func (a *AdapterMock) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error { - return a.GetMockErr() -} - -// RemoveFilteredPolicy removes policy rules that match the filter from the storage. -func (a *AdapterMock) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { - return a.GetMockErr() -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go b/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go deleted file mode 100644 index fe9204af..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2020 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -// UpdatableAdapter is the interface for Casbin adapters with add update policy function. -type UpdatableAdapter interface { - Adapter - // UpdatePolicy updates a policy rule from storage. - // This is part of the Auto-Save feature. - UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error - // UpdatePolicies updates some policy rules to storage, like db, redis. - UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error - // UpdateFilteredPolicies deletes old rules and adds new rules. - UpdateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go deleted file mode 100644 index 55b8ba9d..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2024 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -import "context" - -// ContextUpdatableAdapter is the context-aware interface for Casbin adapters with add update policy function. -type ContextUpdatableAdapter interface { - ContextAdapter - - // UpdatePolicyCtx updates a policy rule from storage. - // This is part of the Auto-Save feature. - UpdatePolicyCtx(ctx context.Context, sec string, ptype string, oldRule, newRule []string) error - // UpdatePoliciesCtx updates some policy rules to storage, like db, redis. - UpdatePoliciesCtx(ctx context.Context, sec string, ptype string, oldRules, newRules [][]string) error - // UpdateFilteredPoliciesCtx deletes old rules and adds new rules. - UpdateFilteredPoliciesCtx(ctx context.Context, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher.go b/vendor/github.com/casbin/casbin/v2/persist/watcher.go deleted file mode 100644 index 0d843606..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/watcher.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -// Watcher is the interface for Casbin watchers. -type Watcher interface { - // SetUpdateCallback sets the callback function that the watcher will call - // when the policy in DB has been changed by other instances. - // A classic callback is Enforcer.LoadPolicy(). - SetUpdateCallback(func(string)) error - // Update calls the update callback of other instances to synchronize their policy. - // It is usually called after changing the policy in DB, like Enforcer.SavePolicy(), - // Enforcer.AddPolicy(), Enforcer.RemovePolicy(), etc. - Update() error - // Close stops and releases the watcher, the callback function will not be called any more. - Close() -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go b/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go deleted file mode 100644 index 1c6f4299..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -import "github.com/casbin/casbin/v2/model" - -// WatcherEx is the strengthened Casbin watchers. -type WatcherEx interface { - Watcher - // UpdateForAddPolicy calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.AddPolicy() - UpdateForAddPolicy(sec, ptype string, params ...string) error - // UpdateForRemovePolicy calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.RemovePolicy() - UpdateForRemovePolicy(sec, ptype string, params ...string) error - // UpdateForRemoveFilteredPolicy calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.RemoveFilteredNamedGroupingPolicy() - UpdateForRemoveFilteredPolicy(sec, ptype string, fieldIndex int, fieldValues ...string) error - // UpdateForSavePolicy calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.RemoveFilteredNamedGroupingPolicy() - UpdateForSavePolicy(model model.Model) error - // UpdateForAddPolicies calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.AddPolicies() - UpdateForAddPolicies(sec string, ptype string, rules ...[]string) error - // UpdateForRemovePolicies calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.RemovePolicies() - UpdateForRemovePolicies(sec string, ptype string, rules ...[]string) error -} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go b/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go deleted file mode 100644 index 694123c4..00000000 --- a/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package persist - -// UpdatableWatcher is strengthened for Casbin watchers. -type UpdatableWatcher interface { - Watcher - // UpdateForUpdatePolicy calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.UpdatePolicy() - UpdateForUpdatePolicy(sec string, ptype string, oldRule, newRule []string) error - // UpdateForUpdatePolicies calls the update callback of other instances to synchronize their policy. - // It is called after Enforcer.UpdatePolicies() - UpdateForUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error -} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go deleted file mode 100644 index dcaa37f7..00000000 --- a/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2023 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rbac - -import "context" - -// ContextRoleManager provides a context-aware interface to define the operations for managing roles. -// Prefer this over RoleManager interface for context propagation, which is useful for things like handling -// request timeouts. -type ContextRoleManager interface { - RoleManager - - // ClearCtx clears all stored data and resets the role manager to the initial state with context. - ClearCtx(ctx context.Context) error - // AddLinkCtx adds the inheritance link between two roles. role: name1 and role: name2 with context. - // domain is a prefix to the roles (can be used for other purposes). - AddLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error - // DeleteLinkCtx deletes the inheritance link between two roles. role: name1 and role: name2 with context. - // domain is a prefix to the roles (can be used for other purposes). - DeleteLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error - // HasLinkCtx determines whether a link exists between two roles. role: name1 inherits role: name2 with context. - // domain is a prefix to the roles (can be used for other purposes). - HasLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) (bool, error) - // GetRolesCtx gets the roles that a user inherits with context. - // domain is a prefix to the roles (can be used for other purposes). - GetRolesCtx(ctx context.Context, name string, domain ...string) ([]string, error) - // GetUsersCtx gets the users that inherits a role with context. - // domain is a prefix to the users (can be used for other purposes). - GetUsersCtx(ctx context.Context, name string, domain ...string) ([]string, error) - // GetDomainsCtx gets domains that a user has with context. - GetDomainsCtx(ctx context.Context, name string) ([]string, error) - // GetAllDomainsCtx gets all domains with context. - GetAllDomainsCtx(ctx context.Context) ([]string, error) -} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go deleted file mode 100644 index a6ae8693..00000000 --- a/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go +++ /dev/null @@ -1,1014 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package defaultrolemanager - -import ( - "fmt" - "strings" - "sync" - - "github.com/casbin/casbin/v2/log" - "github.com/casbin/casbin/v2/rbac" - "github.com/casbin/casbin/v2/util" -) - -const defaultDomain string = "" - -// Role represents the data structure for a role in RBAC. -type Role struct { - name string - roles *sync.Map - users *sync.Map - matched *sync.Map - matchedBy *sync.Map - linkConditionFuncMap *sync.Map - linkConditionFuncParamsMap *sync.Map -} - -func newRole(name string) *Role { - r := Role{} - r.name = name - r.roles = &sync.Map{} - r.users = &sync.Map{} - r.matched = &sync.Map{} - r.matchedBy = &sync.Map{} - r.linkConditionFuncMap = &sync.Map{} - r.linkConditionFuncParamsMap = &sync.Map{} - return &r -} - -func (r *Role) addRole(role *Role) { - r.roles.Store(role.name, role) - role.addUser(r) -} - -func (r *Role) removeRole(role *Role) { - r.roles.Delete(role.name) - role.removeUser(r) -} - -// should only be called inside addRole. -func (r *Role) addUser(user *Role) { - r.users.Store(user.name, user) -} - -// should only be called inside removeRole. -func (r *Role) removeUser(user *Role) { - r.users.Delete(user.name) -} - -func (r *Role) addMatch(role *Role) { - r.matched.Store(role.name, role) - role.matchedBy.Store(r.name, r) -} - -func (r *Role) removeMatch(role *Role) { - r.matched.Delete(role.name) - role.matchedBy.Delete(r.name) -} - -func (r *Role) removeMatches() { - r.matched.Range(func(key, value interface{}) bool { - r.removeMatch(value.(*Role)) - return true - }) - r.matchedBy.Range(func(key, value interface{}) bool { - value.(*Role).removeMatch(r) - return true - }) -} - -func (r *Role) rangeRoles(fn func(key, value interface{}) bool) { - r.roles.Range(fn) - r.roles.Range(func(key, value interface{}) bool { - role := value.(*Role) - role.matched.Range(fn) - return true - }) - r.matchedBy.Range(func(key, value interface{}) bool { - role := value.(*Role) - role.roles.Range(fn) - return true - }) -} - -func (r *Role) rangeUsers(fn func(key, value interface{}) bool) { - r.users.Range(fn) - r.users.Range(func(key, value interface{}) bool { - role := value.(*Role) - role.matched.Range(fn) - return true - }) - r.matchedBy.Range(func(key, value interface{}) bool { - role := value.(*Role) - role.users.Range(fn) - return true - }) -} - -func (r *Role) toString() string { - roles := r.getRoles() - - if len(roles) == 0 { - return "" - } - - var sb strings.Builder - sb.WriteString(r.name) - sb.WriteString(" < ") - if len(roles) != 1 { - sb.WriteString("(") - } - - for i, role := range roles { - if i == 0 { - sb.WriteString(role) - } else { - sb.WriteString(", ") - sb.WriteString(role) - } - } - - if len(roles) != 1 { - sb.WriteString(")") - } - - return sb.String() -} - -func (r *Role) getRoles() []string { - var names []string - r.rangeRoles(func(key, value interface{}) bool { - names = append(names, key.(string)) - return true - }) - return util.RemoveDuplicateElement(names) -} - -func (r *Role) getUsers() []string { - var names []string - r.rangeUsers(func(key, value interface{}) bool { - names = append(names, key.(string)) - return true - }) - return names -} - -type linkConditionFuncKey struct { - roleName string - domainName string -} - -func (r *Role) addLinkConditionFunc(role *Role, domain string, fn rbac.LinkConditionFunc) { - r.linkConditionFuncMap.Store(linkConditionFuncKey{role.name, domain}, fn) -} - -func (r *Role) getLinkConditionFunc(role *Role, domain string) (rbac.LinkConditionFunc, bool) { - fn, ok := r.linkConditionFuncMap.Load(linkConditionFuncKey{role.name, domain}) - if fn == nil { - return nil, ok - } - return fn.(rbac.LinkConditionFunc), ok -} - -func (r *Role) setLinkConditionFuncParams(role *Role, domain string, params ...string) { - r.linkConditionFuncParamsMap.Store(linkConditionFuncKey{role.name, domain}, params) -} - -func (r *Role) getLinkConditionFuncParams(role *Role, domain string) ([]string, bool) { - params, ok := r.linkConditionFuncParamsMap.Load(linkConditionFuncKey{role.name, domain}) - if params == nil { - return nil, ok - } - return params.([]string), ok -} - -// RoleManagerImpl provides a default implementation for the RoleManager interface. -type RoleManagerImpl struct { - allRoles *sync.Map - maxHierarchyLevel int - matchingFunc rbac.MatchingFunc - domainMatchingFunc rbac.MatchingFunc - logger log.Logger - matchingFuncCache *util.SyncLRUCache -} - -// NewRoleManagerImpl is the constructor for creating an instance of the -// default RoleManager implementation. -func NewRoleManagerImpl(maxHierarchyLevel int) *RoleManagerImpl { - rm := RoleManagerImpl{} - _ = rm.Clear() // init allRoles and matchingFuncCache - rm.maxHierarchyLevel = maxHierarchyLevel - rm.SetLogger(&log.DefaultLogger{}) - return &rm -} - -// use this constructor to avoid rebuild of AddMatchingFunc. -func newRoleManagerWithMatchingFunc(maxHierarchyLevel int, fn rbac.MatchingFunc) *RoleManagerImpl { - rm := NewRoleManagerImpl(maxHierarchyLevel) - rm.matchingFunc = fn - return rm -} - -// rebuilds role cache. -func (rm *RoleManagerImpl) rebuild() { - roles := rm.allRoles - _ = rm.Clear() - rangeLinks(roles, func(name1, name2 string, domain ...string) bool { - _ = rm.AddLink(name1, name2, domain...) - return true - }) -} - -func (rm *RoleManagerImpl) Match(str string, pattern string) bool { - if str == pattern { - return true - } - - if rm.matchingFunc != nil { - return rm.matchingFunc(str, pattern) - } else { - return false - } -} - -func (rm *RoleManagerImpl) rangeMatchingRoles(name string, isPattern bool, fn func(role *Role) bool) { - rm.allRoles.Range(func(key, value interface{}) bool { - name2 := key.(string) - if isPattern && name != name2 && rm.Match(name2, name) { - fn(value.(*Role)) - } else if !isPattern && name != name2 && rm.Match(name, name2) { - fn(value.(*Role)) - } - return true - }) -} - -func (rm *RoleManagerImpl) load(name interface{}) (value *Role, ok bool) { - if r, ok := rm.allRoles.Load(name); ok { - return r.(*Role), true - } - return nil, false -} - -// loads or creates a role. -func (rm *RoleManagerImpl) getRole(name string) (r *Role, created bool) { - var role *Role - var ok bool - - if role, ok = rm.load(name); !ok { - role = newRole(name) - rm.allRoles.Store(name, role) - - if rm.matchingFunc != nil { - rm.rangeMatchingRoles(name, false, func(r *Role) bool { - r.addMatch(role) - return true - }) - - rm.rangeMatchingRoles(name, true, func(r *Role) bool { - role.addMatch(r) - return true - }) - } - } - - return role, !ok -} - -func loadAndDelete(m *sync.Map, name string) (value interface{}, loaded bool) { - value, loaded = m.Load(name) - if loaded { - m.Delete(name) - } - return value, loaded -} - -func (rm *RoleManagerImpl) removeRole(name string) { - if role, ok := loadAndDelete(rm.allRoles, name); ok { - role.(*Role).removeMatches() - } -} - -// AddMatchingFunc support use pattern in g. -func (rm *RoleManagerImpl) AddMatchingFunc(name string, fn rbac.MatchingFunc) { - rm.matchingFunc = fn - rm.rebuild() -} - -// AddDomainMatchingFunc support use domain pattern in g. -func (rm *RoleManagerImpl) AddDomainMatchingFunc(name string, fn rbac.MatchingFunc) { - rm.domainMatchingFunc = fn -} - -// SetLogger sets role manager's logger. -func (rm *RoleManagerImpl) SetLogger(logger log.Logger) { - rm.logger = logger -} - -// Clear clears all stored data and resets the role manager to the initial state. -func (rm *RoleManagerImpl) Clear() error { - rm.matchingFuncCache = util.NewSyncLRUCache(100) - rm.allRoles = &sync.Map{} - return nil -} - -// AddLink adds the inheritance link between role: name1 and role: name2. -// aka role: name1 inherits role: name2. -func (rm *RoleManagerImpl) AddLink(name1 string, name2 string, domains ...string) error { - user, _ := rm.getRole(name1) - role, _ := rm.getRole(name2) - user.addRole(role) - return nil -} - -// DeleteLink deletes the inheritance link between role: name1 and role: name2. -// aka role: name1 does not inherit role: name2 any more. -func (rm *RoleManagerImpl) DeleteLink(name1 string, name2 string, domains ...string) error { - user, _ := rm.getRole(name1) - role, _ := rm.getRole(name2) - user.removeRole(role) - return nil -} - -// HasLink determines whether role: name1 inherits role: name2. -func (rm *RoleManagerImpl) HasLink(name1 string, name2 string, domains ...string) (bool, error) { - if name1 == name2 || (rm.matchingFunc != nil && rm.Match(name1, name2)) { - return true, nil - } - - user, userCreated := rm.getRole(name1) - role, roleCreated := rm.getRole(name2) - - if userCreated { - defer rm.removeRole(user.name) - } - if roleCreated { - defer rm.removeRole(role.name) - } - - return rm.hasLinkHelper(role.name, map[string]*Role{user.name: user}, rm.maxHierarchyLevel), nil -} - -func (rm *RoleManagerImpl) hasLinkHelper(targetName string, roles map[string]*Role, level int) bool { - if level < 0 || len(roles) == 0 { - return false - } - - nextRoles := map[string]*Role{} - for _, role := range roles { - if targetName == role.name || (rm.matchingFunc != nil && rm.Match(role.name, targetName)) { - return true - } - role.rangeRoles(func(key, value interface{}) bool { - nextRoles[key.(string)] = value.(*Role) - return true - }) - } - - return rm.hasLinkHelper(targetName, nextRoles, level-1) -} - -// GetRoles gets the roles that a user inherits. -func (rm *RoleManagerImpl) GetRoles(name string, domains ...string) ([]string, error) { - user, created := rm.getRole(name) - if created { - defer rm.removeRole(user.name) - } - return user.getRoles(), nil -} - -// GetUsers gets the users of a role. -// domain is an unreferenced parameter here, may be used in other implementations. -func (rm *RoleManagerImpl) GetUsers(name string, domain ...string) ([]string, error) { - role, created := rm.getRole(name) - if created { - defer rm.removeRole(role.name) - } - return role.getUsers(), nil -} - -func (rm *RoleManagerImpl) toString() []string { - var roles []string - - rm.allRoles.Range(func(key, value interface{}) bool { - role := value.(*Role) - if text := role.toString(); text != "" { - roles = append(roles, text) - } - return true - }) - - return roles -} - -// PrintRoles prints all the roles to log. -func (rm *RoleManagerImpl) PrintRoles() error { - if !(rm.logger).IsEnabled() { - return nil - } - roles := rm.toString() - rm.logger.LogRole(roles) - return nil -} - -// GetDomains gets domains that a user has. -func (rm *RoleManagerImpl) GetDomains(name string) ([]string, error) { - domains := []string{defaultDomain} - return domains, nil -} - -// GetAllDomains gets all domains. -func (rm *RoleManagerImpl) GetAllDomains() ([]string, error) { - domains := []string{defaultDomain} - return domains, nil -} - -func (rm *RoleManagerImpl) copyFrom(other *RoleManagerImpl) { - other.Range(func(name1, name2 string, domain ...string) bool { - _ = rm.AddLink(name1, name2, domain...) - return true - }) -} - -func rangeLinks(users *sync.Map, fn func(name1, name2 string, domain ...string) bool) { - users.Range(func(_, value interface{}) bool { - user := value.(*Role) - user.roles.Range(func(key, _ interface{}) bool { - roleName := key.(string) - return fn(user.name, roleName, defaultDomain) - }) - return true - }) -} - -func (rm *RoleManagerImpl) Range(fn func(name1, name2 string, domain ...string) bool) { - rangeLinks(rm.allRoles, fn) -} - -// Deprecated: BuildRelationship is no longer required. -func (rm *RoleManagerImpl) BuildRelationship(name1 string, name2 string, domain ...string) error { - return nil -} - -type DomainManager struct { - rmMap *sync.Map - maxHierarchyLevel int - matchingFunc rbac.MatchingFunc - domainMatchingFunc rbac.MatchingFunc - logger log.Logger - matchingFuncCache *util.SyncLRUCache -} - -// NewDomainManager is the constructor for creating an instance of the -// default DomainManager implementation. -func NewDomainManager(maxHierarchyLevel int) *DomainManager { - dm := &DomainManager{} - _ = dm.Clear() // init rmMap and rmCache - dm.maxHierarchyLevel = maxHierarchyLevel - return dm -} - -// SetLogger sets role manager's logger. -func (dm *DomainManager) SetLogger(logger log.Logger) { - dm.logger = logger -} - -// AddMatchingFunc support use pattern in g. -func (dm *DomainManager) AddMatchingFunc(name string, fn rbac.MatchingFunc) { - dm.matchingFunc = fn - dm.rmMap.Range(func(key, value interface{}) bool { - value.(*RoleManagerImpl).AddMatchingFunc(name, fn) - return true - }) -} - -// AddDomainMatchingFunc support use domain pattern in g. -func (dm *DomainManager) AddDomainMatchingFunc(name string, fn rbac.MatchingFunc) { - dm.domainMatchingFunc = fn - dm.rmMap.Range(func(key, value interface{}) bool { - value.(*RoleManagerImpl).AddDomainMatchingFunc(name, fn) - return true - }) - dm.rebuild() -} - -// clears the map of RoleManagers. -func (dm *DomainManager) rebuild() { - rmMap := dm.rmMap - _ = dm.Clear() - rmMap.Range(func(key, value interface{}) bool { - domain := key.(string) - rm := value.(*RoleManagerImpl) - - rm.Range(func(name1, name2 string, _ ...string) bool { - _ = dm.AddLink(name1, name2, domain) - return true - }) - return true - }) -} - -// Clear clears all stored data and resets the role manager to the initial state. -func (dm *DomainManager) Clear() error { - dm.rmMap = &sync.Map{} - dm.matchingFuncCache = util.NewSyncLRUCache(100) - return nil -} - -func (dm *DomainManager) getDomain(domains ...string) (domain string, err error) { - switch len(domains) { - case 0: - return defaultDomain, nil - default: - return domains[0], nil - } -} - -func (dm *DomainManager) Match(str string, pattern string) bool { - if str == pattern { - return true - } - - if dm.domainMatchingFunc != nil { - return dm.domainMatchingFunc(str, pattern) - } else { - return false - } -} - -func (dm *DomainManager) rangeAffectedRoleManagers(domain string, fn func(rm *RoleManagerImpl)) { - if dm.domainMatchingFunc != nil { - dm.rmMap.Range(func(key, value interface{}) bool { - domain2 := key.(string) - if domain != domain2 && dm.Match(domain2, domain) { - fn(value.(*RoleManagerImpl)) - } - return true - }) - } -} - -func (dm *DomainManager) load(name interface{}) (value *RoleManagerImpl, ok bool) { - if r, ok := dm.rmMap.Load(name); ok { - return r.(*RoleManagerImpl), true - } - return nil, false -} - -// load or create a RoleManager instance of domain. -func (dm *DomainManager) getRoleManager(domain string, store bool) *RoleManagerImpl { - var rm *RoleManagerImpl - var ok bool - - if rm, ok = dm.load(domain); !ok { - rm = newRoleManagerWithMatchingFunc(dm.maxHierarchyLevel, dm.matchingFunc) - if store { - dm.rmMap.Store(domain, rm) - } - if dm.domainMatchingFunc != nil { - dm.rmMap.Range(func(key, value interface{}) bool { - domain2 := key.(string) - rm2 := value.(*RoleManagerImpl) - if domain != domain2 && dm.Match(domain, domain2) { - rm.copyFrom(rm2) - } - return true - }) - } - } - return rm -} - -// AddLink adds the inheritance link between role: name1 and role: name2. -// aka role: name1 inherits role: name2. -func (dm *DomainManager) AddLink(name1 string, name2 string, domains ...string) error { - domain, err := dm.getDomain(domains...) - if err != nil { - return err - } - roleManager := dm.getRoleManager(domain, true) // create role manager if it does not exist - _ = roleManager.AddLink(name1, name2, domains...) - - dm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { - _ = rm.AddLink(name1, name2, domains...) - }) - return nil -} - -// DeleteLink deletes the inheritance link between role: name1 and role: name2. -// aka role: name1 does not inherit role: name2 any more. -func (dm *DomainManager) DeleteLink(name1 string, name2 string, domains ...string) error { - domain, err := dm.getDomain(domains...) - if err != nil { - return err - } - roleManager := dm.getRoleManager(domain, true) // create role manager if it does not exist - _ = roleManager.DeleteLink(name1, name2, domains...) - - dm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { - _ = rm.DeleteLink(name1, name2, domains...) - }) - return nil -} - -// HasLink determines whether role: name1 inherits role: name2. -func (dm *DomainManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { - domain, err := dm.getDomain(domains...) - if err != nil { - return false, err - } - rm := dm.getRoleManager(domain, false) - return rm.HasLink(name1, name2, domains...) -} - -// GetRoles gets the roles that a subject inherits. -func (dm *DomainManager) GetRoles(name string, domains ...string) ([]string, error) { - domain, err := dm.getDomain(domains...) - if err != nil { - return nil, err - } - rm := dm.getRoleManager(domain, false) - return rm.GetRoles(name, domains...) -} - -// GetUsers gets the users of a role. -func (dm *DomainManager) GetUsers(name string, domains ...string) ([]string, error) { - domain, err := dm.getDomain(domains...) - if err != nil { - return nil, err - } - rm := dm.getRoleManager(domain, false) - return rm.GetUsers(name, domains...) -} - -func (dm *DomainManager) toString() []string { - var roles []string - - dm.rmMap.Range(func(key, value interface{}) bool { - domain := key.(string) - rm := value.(*RoleManagerImpl) - domainRoles := rm.toString() - roles = append(roles, fmt.Sprintf("%s: %s", domain, strings.Join(domainRoles, ", "))) - return true - }) - - return roles -} - -// PrintRoles prints all the roles to log. -func (dm *DomainManager) PrintRoles() error { - if !(dm.logger).IsEnabled() { - return nil - } - - roles := dm.toString() - dm.logger.LogRole(roles) - return nil -} - -// GetDomains gets domains that a user has. -func (dm *DomainManager) GetDomains(name string) ([]string, error) { - var domains []string - dm.rmMap.Range(func(key, value interface{}) bool { - domain := key.(string) - rm := value.(*RoleManagerImpl) - role, created := rm.getRole(name) - if created { - defer rm.removeRole(role.name) - } - if len(role.getUsers()) > 0 || len(role.getRoles()) > 0 { - domains = append(domains, domain) - } - return true - }) - return domains, nil -} - -// GetAllDomains gets all domains. -func (dm *DomainManager) GetAllDomains() ([]string, error) { - var domains []string - dm.rmMap.Range(func(key, value interface{}) bool { - domains = append(domains, key.(string)) - return true - }) - return domains, nil -} - -// Deprecated: BuildRelationship is no longer required. -func (dm *DomainManager) BuildRelationship(name1 string, name2 string, domain ...string) error { - return nil -} - -type RoleManager struct { - *DomainManager -} - -func NewRoleManager(maxHierarchyLevel int) *RoleManager { - rm := &RoleManager{} - rm.DomainManager = NewDomainManager(maxHierarchyLevel) - return rm -} - -type ConditionalRoleManager struct { - RoleManagerImpl -} - -func (crm *ConditionalRoleManager) copyFrom(other *ConditionalRoleManager) { - other.Range(func(name1, name2 string, domain ...string) bool { - _ = crm.AddLink(name1, name2, domain...) - return true - }) -} - -// use this constructor to avoid rebuild of AddMatchingFunc. -func newConditionalRoleManagerWithMatchingFunc(maxHierarchyLevel int, fn rbac.MatchingFunc) *ConditionalRoleManager { - rm := NewConditionalRoleManager(maxHierarchyLevel) - rm.matchingFunc = fn - return rm -} - -// NewConditionalRoleManager is the constructor for creating an instance of the -// ConditionalRoleManager implementation. -func NewConditionalRoleManager(maxHierarchyLevel int) *ConditionalRoleManager { - rm := ConditionalRoleManager{} - _ = rm.Clear() // init allRoles and matchingFuncCache - rm.maxHierarchyLevel = maxHierarchyLevel - rm.SetLogger(&log.DefaultLogger{}) - return &rm -} - -// HasLink determines whether role: name1 inherits role: name2. -func (crm *ConditionalRoleManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { - if name1 == name2 || (crm.matchingFunc != nil && crm.Match(name1, name2)) { - return true, nil - } - - user, userCreated := crm.getRole(name1) - role, roleCreated := crm.getRole(name2) - - if userCreated { - defer crm.removeRole(user.name) - } - if roleCreated { - defer crm.removeRole(role.name) - } - - return crm.hasLinkHelper(role.name, map[string]*Role{user.name: user}, crm.maxHierarchyLevel, domains...), nil -} - -// hasLinkHelper use the Breadth First Search algorithm to traverse the Role tree -// Judging whether the user has a role (has link) is to judge whether the role node can be reached from the user node. -func (crm *ConditionalRoleManager) hasLinkHelper(targetName string, roles map[string]*Role, level int, domains ...string) bool { - if level < 0 || len(roles) == 0 { - return false - } - nextRoles := map[string]*Role{} - for _, role := range roles { - if targetName == role.name || (crm.matchingFunc != nil && crm.Match(role.name, targetName)) { - return true - } - role.rangeRoles(func(key, value interface{}) bool { - nextRole := value.(*Role) - return crm.getNextRoles(role, nextRole, domains, nextRoles) - }) - } - - return crm.hasLinkHelper(targetName, nextRoles, level-1) -} - -func (crm *ConditionalRoleManager) getNextRoles(currentRole, nextRole *Role, domains []string, nextRoles map[string]*Role) bool { - passLinkConditionFunc := true - var err error - // If LinkConditionFunc exists, it needs to pass the verification to get nextRole - if len(domains) == 0 { - if linkConditionFunc, existLinkCondition := crm.GetLinkConditionFunc(currentRole.name, nextRole.name); existLinkCondition { - params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name) - passLinkConditionFunc, err = linkConditionFunc(params...) - } - } else { - if linkConditionFunc, existLinkCondition := crm.GetDomainLinkConditionFunc(currentRole.name, nextRole.name, domains[0]); existLinkCondition { - params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name, domains[0]) - passLinkConditionFunc, err = linkConditionFunc(params...) - } - } - - if err != nil { - crm.logger.LogError(err, "hasLinkHelper LinkCondition Error") - return false - } - - if passLinkConditionFunc { - nextRoles[nextRole.name] = nextRole - } - - return true -} - -// GetLinkConditionFunc get LinkConditionFunc based on userName, roleName. -func (crm *ConditionalRoleManager) GetLinkConditionFunc(userName, roleName string) (rbac.LinkConditionFunc, bool) { - return crm.GetDomainLinkConditionFunc(userName, roleName, defaultDomain) -} - -// GetDomainLinkConditionFunc get LinkConditionFunc based on userName, roleName, domain. -func (crm *ConditionalRoleManager) GetDomainLinkConditionFunc(userName, roleName, domain string) (rbac.LinkConditionFunc, bool) { - user, userCreated := crm.getRole(userName) - role, roleCreated := crm.getRole(roleName) - - if userCreated { - crm.removeRole(user.name) - return nil, false - } - - if roleCreated { - crm.removeRole(role.name) - return nil, false - } - - return user.getLinkConditionFunc(role, domain) -} - -// GetLinkConditionFuncParams gets parameters of LinkConditionFunc based on userName, roleName, domain. -func (crm *ConditionalRoleManager) GetLinkConditionFuncParams(userName, roleName string, domain ...string) ([]string, bool) { - user, userCreated := crm.getRole(userName) - role, roleCreated := crm.getRole(roleName) - - if userCreated { - crm.removeRole(user.name) - return nil, false - } - - if roleCreated { - crm.removeRole(role.name) - return nil, false - } - - domainName := defaultDomain - if len(domain) != 0 { - domainName = domain[0] - } - - if params, ok := user.getLinkConditionFuncParams(role, domainName); ok { - return params, true - } else { - return nil, false - } -} - -// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc. -func (crm *ConditionalRoleManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { - crm.AddDomainLinkConditionFunc(userName, roleName, defaultDomain, fn) -} - -// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc. -func (crm *ConditionalRoleManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { - user, _ := crm.getRole(userName) - role, _ := crm.getRole(roleName) - - user.addLinkConditionFunc(role, domain, fn) -} - -// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. -func (crm *ConditionalRoleManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { - crm.SetDomainLinkConditionFuncParams(userName, roleName, defaultDomain, params...) -} - -// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. -func (crm *ConditionalRoleManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { - user, _ := crm.getRole(userName) - role, _ := crm.getRole(roleName) - - user.setLinkConditionFuncParams(role, domain, params...) -} - -type ConditionalDomainManager struct { - ConditionalRoleManager - DomainManager -} - -// NewConditionalDomainManager is the constructor for creating an instance of the -// ConditionalDomainManager implementation. -func NewConditionalDomainManager(maxHierarchyLevel int) *ConditionalDomainManager { - rm := ConditionalDomainManager{} - _ = rm.Clear() // init allRoles and matchingFuncCache - rm.maxHierarchyLevel = maxHierarchyLevel - rm.SetLogger(&log.DefaultLogger{}) - return &rm -} - -func (cdm *ConditionalDomainManager) load(name interface{}) (value *ConditionalRoleManager, ok bool) { - if r, ok := cdm.rmMap.Load(name); ok { - return r.(*ConditionalRoleManager), true - } - return nil, false -} - -// load or create a ConditionalRoleManager instance of domain. -func (cdm *ConditionalDomainManager) getConditionalRoleManager(domain string, store bool) *ConditionalRoleManager { - var rm *ConditionalRoleManager - var ok bool - - if rm, ok = cdm.load(domain); !ok { - rm = newConditionalRoleManagerWithMatchingFunc(cdm.maxHierarchyLevel, cdm.matchingFunc) - if store { - cdm.rmMap.Store(domain, rm) - } - if cdm.domainMatchingFunc != nil { - cdm.rmMap.Range(func(key, value interface{}) bool { - domain2 := key.(string) - rm2 := value.(*ConditionalRoleManager) - if domain != domain2 && cdm.Match(domain, domain2) { - rm.copyFrom(rm2) - } - return true - }) - } - } - return rm -} - -// HasLink determines whether role: name1 inherits role: name2. -func (cdm *ConditionalDomainManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { - domain, err := cdm.getDomain(domains...) - if err != nil { - return false, err - } - rm := cdm.getConditionalRoleManager(domain, false) - return rm.HasLink(name1, name2, domains...) -} - -// AddLink adds the inheritance link between role: name1 and role: name2. -// aka role: name1 inherits role: name2. -func (cdm *ConditionalDomainManager) AddLink(name1 string, name2 string, domains ...string) error { - domain, err := cdm.getDomain(domains...) - if err != nil { - return err - } - conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) // create role manager if it does not exist - _ = conditionalRoleManager.AddLink(name1, name2, domain) - - cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { - _ = rm.AddLink(name1, name2, domain) - }) - return nil -} - -// DeleteLink deletes the inheritance link between role: name1 and role: name2. -// aka role: name1 does not inherit role: name2 any more. -func (cdm *ConditionalDomainManager) DeleteLink(name1 string, name2 string, domains ...string) error { - domain, err := cdm.getDomain(domains...) - if err != nil { - return err - } - conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) // create role manager if it does not exist - _ = conditionalRoleManager.DeleteLink(name1, name2, domain) - - cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { - _ = rm.DeleteLink(name1, name2, domain) - }) - return nil -} - -// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc. -func (cdm *ConditionalDomainManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { - cdm.rmMap.Range(func(key, value interface{}) bool { - value.(*ConditionalRoleManager).AddLinkConditionFunc(userName, roleName, fn) - return true - }) -} - -// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc. -func (cdm *ConditionalDomainManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { - cdm.rmMap.Range(func(key, value interface{}) bool { - value.(*ConditionalRoleManager).AddDomainLinkConditionFunc(userName, roleName, domain, fn) - return true - }) -} - -// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName. -func (cdm *ConditionalDomainManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { - cdm.rmMap.Range(func(key, value interface{}) bool { - value.(*ConditionalRoleManager).SetLinkConditionFuncParams(userName, roleName, params...) - return true - }) -} - -// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. -func (cdm *ConditionalDomainManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { - cdm.rmMap.Range(func(key, value interface{}) bool { - value.(*ConditionalRoleManager).SetDomainLinkConditionFuncParams(userName, roleName, domain, params...) - return true - }) -} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go deleted file mode 100644 index 28b40a35..00000000 --- a/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rbac - -import "github.com/casbin/casbin/v2/log" - -type MatchingFunc func(arg1 string, arg2 string) bool - -type LinkConditionFunc = func(args ...string) (bool, error) - -// RoleManager provides interface to define the operations for managing roles. -type RoleManager interface { - // Clear clears all stored data and resets the role manager to the initial state. - Clear() error - // AddLink adds the inheritance link between two roles. role: name1 and role: name2. - // domain is a prefix to the roles (can be used for other purposes). - AddLink(name1 string, name2 string, domain ...string) error - // Deprecated: BuildRelationship is no longer required - BuildRelationship(name1 string, name2 string, domain ...string) error - // DeleteLink deletes the inheritance link between two roles. role: name1 and role: name2. - // domain is a prefix to the roles (can be used for other purposes). - DeleteLink(name1 string, name2 string, domain ...string) error - // HasLink determines whether a link exists between two roles. role: name1 inherits role: name2. - // domain is a prefix to the roles (can be used for other purposes). - HasLink(name1 string, name2 string, domain ...string) (bool, error) - // GetRoles gets the roles that a user inherits. - // domain is a prefix to the roles (can be used for other purposes). - GetRoles(name string, domain ...string) ([]string, error) - // GetUsers gets the users that inherits a role. - // domain is a prefix to the users (can be used for other purposes). - GetUsers(name string, domain ...string) ([]string, error) - // GetDomains gets domains that a user has - GetDomains(name string) ([]string, error) - // GetAllDomains gets all domains - GetAllDomains() ([]string, error) - // PrintRoles prints all the roles to log. - PrintRoles() error - // SetLogger sets role manager's logger. - SetLogger(logger log.Logger) - // Match matches the domain with the pattern - Match(str string, pattern string) bool - // AddMatchingFunc adds the matching function - AddMatchingFunc(name string, fn MatchingFunc) - // AddDomainMatchingFunc adds the domain matching function - AddDomainMatchingFunc(name string, fn MatchingFunc) -} - -// ConditionalRoleManager provides interface to define the operations for managing roles. -// Link with conditions is supported. -type ConditionalRoleManager interface { - RoleManager - - // AddLinkConditionFunc Add condition function fn for Link userName->roleName, - // when fn returns true, Link is valid, otherwise invalid - AddLinkConditionFunc(userName, roleName string, fn LinkConditionFunc) - // SetLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName - SetLinkConditionFuncParams(userName, roleName string, params ...string) - // AddDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, - // when fn returns true, Link is valid, otherwise invalid - AddDomainLinkConditionFunc(user string, role string, domain string, fn LinkConditionFunc) - // SetDomainLinkConditionFuncParams Sets the parameters of the condition function fn - // for Link userName->{roleName, domain} - SetDomainLinkConditionFuncParams(user string, role string, domain string, params ...string) -} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api.go b/vendor/github.com/casbin/casbin/v2/rbac_api.go deleted file mode 100644 index 91a8c2e9..00000000 --- a/vendor/github.com/casbin/casbin/v2/rbac_api.go +++ /dev/null @@ -1,607 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "fmt" - "strings" - - "github.com/casbin/casbin/v2/constant" - "github.com/casbin/casbin/v2/errors" - "github.com/casbin/casbin/v2/util" -) - -// GetRolesForUser gets the roles that a user has. -func (e *Enforcer) GetRolesForUser(name string, domain ...string) ([]string, error) { - rm := e.GetRoleManager() - if rm == nil { - return nil, fmt.Errorf("role manager is not initialized") - } - res, err := rm.GetRoles(name, domain...) - return res, err -} - -// GetUsersForRole gets the users that has a role. -func (e *Enforcer) GetUsersForRole(name string, domain ...string) ([]string, error) { - rm := e.GetRoleManager() - if rm == nil { - return nil, fmt.Errorf("role manager is not initialized") - } - res, err := rm.GetUsers(name, domain...) - return res, err -} - -// HasRoleForUser determines whether a user has a role. -func (e *Enforcer) HasRoleForUser(name string, role string, domain ...string) (bool, error) { - roles, err := e.GetRolesForUser(name, domain...) - if err != nil { - return false, err - } - hasRole := false - for _, r := range roles { - if r == role { - hasRole = true - break - } - } - - return hasRole, nil -} - -// AddRoleForUser adds a role for a user. -// Returns false if the user already has the role (aka not affected). -func (e *Enforcer) AddRoleForUser(user string, role string, domain ...string) (bool, error) { - args := []string{user, role} - args = append(args, domain...) - return e.AddGroupingPolicy(args) -} - -// AddRolesForUser adds roles for a user. -// Returns false if the user already has the roles (aka not affected). -func (e *Enforcer) AddRolesForUser(user string, roles []string, domain ...string) (bool, error) { - var rules [][]string - for _, role := range roles { - rule := []string{user, role} - rule = append(rule, domain...) - rules = append(rules, rule) - } - return e.AddGroupingPolicies(rules) -} - -// DeleteRoleForUser deletes a role for a user. -// Returns false if the user does not have the role (aka not affected). -func (e *Enforcer) DeleteRoleForUser(user string, role string, domain ...string) (bool, error) { - args := []string{user, role} - args = append(args, domain...) - return e.RemoveGroupingPolicy(args) -} - -// DeleteRolesForUser deletes all roles for a user. -// Returns false if the user does not have any roles (aka not affected). -func (e *Enforcer) DeleteRolesForUser(user string, domain ...string) (bool, error) { - var args []string - if len(domain) == 0 { - args = []string{user} - } else if len(domain) > 1 { - return false, errors.ErrDomainParameter - } else { - args = []string{user, "", domain[0]} - } - return e.RemoveFilteredGroupingPolicy(0, args...) -} - -// DeleteUser deletes a user. -// Returns false if the user does not exist (aka not affected). -func (e *Enforcer) DeleteUser(user string) (bool, error) { - var err error - res1, err := e.RemoveFilteredGroupingPolicy(0, user) - if err != nil { - return res1, err - } - - subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) - if err != nil { - return false, err - } - res2, err := e.RemoveFilteredPolicy(subIndex, user) - return res1 || res2, err -} - -// DeleteRole deletes a role. -// Returns false if the role does not exist (aka not affected). -func (e *Enforcer) DeleteRole(role string) (bool, error) { - var err error - res1, err := e.RemoveFilteredGroupingPolicy(0, role) - if err != nil { - return res1, err - } - - res2, err := e.RemoveFilteredGroupingPolicy(1, role) - if err != nil { - return res1, err - } - - subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) - if err != nil { - return false, err - } - res3, err := e.RemoveFilteredPolicy(subIndex, role) - return res1 || res2 || res3, err -} - -// DeletePermission deletes a permission. -// Returns false if the permission does not exist (aka not affected). -func (e *Enforcer) DeletePermission(permission ...string) (bool, error) { - return e.RemoveFilteredPolicy(1, permission...) -} - -// AddPermissionForUser adds a permission for a user or role. -// Returns false if the user or role already has the permission (aka not affected). -func (e *Enforcer) AddPermissionForUser(user string, permission ...string) (bool, error) { - return e.AddPolicy(util.JoinSlice(user, permission...)) -} - -// AddPermissionsForUser adds multiple permissions for a user or role. -// Returns false if the user or role already has one of the permissions (aka not affected). -func (e *Enforcer) AddPermissionsForUser(user string, permissions ...[]string) (bool, error) { - var rules [][]string - for _, permission := range permissions { - rules = append(rules, util.JoinSlice(user, permission...)) - } - return e.AddPolicies(rules) -} - -// DeletePermissionForUser deletes a permission for a user or role. -// Returns false if the user or role does not have the permission (aka not affected). -func (e *Enforcer) DeletePermissionForUser(user string, permission ...string) (bool, error) { - return e.RemovePolicy(util.JoinSlice(user, permission...)) -} - -// DeletePermissionsForUser deletes permissions for a user or role. -// Returns false if the user or role does not have any permissions (aka not affected). -func (e *Enforcer) DeletePermissionsForUser(user string) (bool, error) { - subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) - if err != nil { - return false, err - } - return e.RemoveFilteredPolicy(subIndex, user) -} - -// GetPermissionsForUser gets permissions for a user or role. -func (e *Enforcer) GetPermissionsForUser(user string, domain ...string) ([][]string, error) { - return e.GetNamedPermissionsForUser("p", user, domain...) -} - -// GetNamedPermissionsForUser gets permissions for a user or role by named policy. -func (e *Enforcer) GetNamedPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { - permission := make([][]string, 0) - for pType, assertion := range e.model["p"] { - if pType != ptype { - continue - } - args := make([]string, len(assertion.Tokens)) - subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) - if err != nil { - subIndex = 0 - } - args[subIndex] = user - - if len(domain) > 0 { - var index int - index, err = e.GetFieldIndex(ptype, constant.DomainIndex) - if err != nil { - return permission, err - } - args[index] = domain[0] - } - perm, err := e.GetFilteredNamedPolicy(ptype, 0, args...) - if err != nil { - return permission, err - } - permission = append(permission, perm...) - } - return permission, nil -} - -// HasPermissionForUser determines whether a user has a permission. -func (e *Enforcer) HasPermissionForUser(user string, permission ...string) (bool, error) { - return e.HasPolicy(util.JoinSlice(user, permission...)) -} - -// GetImplicitRolesForUser gets implicit roles that a user has. -// Compared to GetRolesForUser(), this function retrieves indirect roles besides direct roles. -// For example: -// g, alice, role:admin -// g, role:admin, role:user -// -// GetRolesForUser("alice") can only get: ["role:admin"]. -// But GetImplicitRolesForUser("alice") will get: ["role:admin", "role:user"]. -func (e *Enforcer) GetImplicitRolesForUser(name string, domain ...string) ([]string, error) { - res := []string{} - - for _, rm := range e.rmMap { - roleSet := make(map[string]bool) - roleSet[name] = true - q := make([]string, 0) - q = append(q, name) - - for len(q) > 0 { - name := q[0] - q = q[1:] - - roles, err := rm.GetRoles(name, domain...) - if err != nil { - return nil, err - } - for _, r := range roles { - if _, ok := roleSet[r]; !ok { - res = append(res, r) - q = append(q, r) - roleSet[r] = true - } - } - } - } - - return res, nil -} - -// GetImplicitUsersForRole gets implicit users for a role. -func (e *Enforcer) GetImplicitUsersForRole(name string, domain ...string) ([]string, error) { - res := []string{} - - for _, rm := range e.rmMap { - roleSet := make(map[string]bool) - roleSet[name] = true - q := make([]string, 0) - q = append(q, name) - - for len(q) > 0 { - name := q[0] - q = q[1:] - - roles, err := rm.GetUsers(name, domain...) - if err != nil && err.Error() != "error: name does not exist" { - return nil, err - } - for _, r := range roles { - if _, ok := roleSet[r]; !ok { - res = append(res, r) - q = append(q, r) - roleSet[r] = true - } - } - } - } - - return res, nil -} - -// GetImplicitPermissionsForUser gets implicit permissions for a user or role. -// Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles. -// For example: -// p, admin, data1, read -// p, alice, data2, read -// g, alice, admin -// -// GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]]. -// But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]]. -func (e *Enforcer) GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) { - return e.GetNamedImplicitPermissionsForUser("p", user, domain...) -} - -// GetNamedImplicitPermissionsForUser gets implicit permissions for a user or role by named policy. -// Compared to GetNamedPermissionsForUser(), this function retrieves permissions for inherited roles. -// For example: -// p, admin, data1, read -// p2, admin, create -// g, alice, admin -// -// GetImplicitPermissionsForUser("alice") can only get: [["admin", "data1", "read"]], whose policy is default policy "p" -// But you can specify the named policy "p2" to get: [["admin", "create"]] by GetNamedImplicitPermissionsForUser("p2","alice"). -func (e *Enforcer) GetNamedImplicitPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { - permission := make([][]string, 0) - rm := e.GetRoleManager() - if rm == nil { - return nil, fmt.Errorf("role manager is not initialized") - } - domainIndex, _ := e.GetFieldIndex(ptype, constant.DomainIndex) - for _, rule := range e.model["p"][ptype].Policy { - if len(domain) == 0 { - matched, _ := rm.HasLink(user, rule[0]) - if matched { - permission = append(permission, deepCopyPolicy(rule)) - } - continue - } - if len(domain) > 1 { - return nil, errors.ErrDomainParameter - } - d := domain[0] - matched := rm.Match(d, rule[domainIndex]) - if !matched { - continue - } - matched, _ = rm.HasLink(user, rule[0], d) - if matched { - newRule := deepCopyPolicy(rule) - newRule[domainIndex] = d - permission = append(permission, newRule) - } - } - return permission, nil -} - -// GetImplicitUsersForPermission gets implicit users for a permission. -// For example: -// p, admin, data1, read -// p, bob, data1, read -// g, alice, admin -// -// GetImplicitUsersForPermission("data1", "read") will get: ["alice", "bob"]. -// Note: only users will be returned, roles (2nd arg in "g") will be excluded. -func (e *Enforcer) GetImplicitUsersForPermission(permission ...string) ([]string, error) { - pSubjects, err := e.GetAllSubjects() - if err != nil { - return nil, err - } - gInherit, err := e.model.GetValuesForFieldInPolicyAllTypes("g", 1) - if err != nil { - return nil, err - } - gSubjects, err := e.model.GetValuesForFieldInPolicyAllTypes("g", 0) - if err != nil { - return nil, err - } - - subjects := append(pSubjects, gSubjects...) - util.ArrayRemoveDuplicates(&subjects) - - subjects = util.SetSubtract(subjects, gInherit) - - res := []string{} - for _, user := range subjects { - req := util.JoinSliceAny(user, permission...) - allowed, err := e.Enforce(req...) - if err != nil { - return nil, err - } - - if allowed { - res = append(res, user) - } - } - - return res, nil -} - -// GetDomainsForUser gets all domains. -func (e *Enforcer) GetDomainsForUser(user string) ([]string, error) { - var domains []string - for _, rm := range e.rmMap { - domain, err := rm.GetDomains(user) - if err != nil { - return nil, err - } - domains = append(domains, domain...) - } - return domains, nil -} - -// GetImplicitResourcesForUser returns all policies that user obtaining in domain. -func (e *Enforcer) GetImplicitResourcesForUser(user string, domain ...string) ([][]string, error) { - permissions, err := e.GetImplicitPermissionsForUser(user, domain...) - if err != nil { - return nil, err - } - res := make([][]string, 0) - for _, permission := range permissions { - if permission[0] == user { - res = append(res, permission) - continue - } - resLocal := [][]string{{user}} - tokensLength := len(permission) - t := make([][]string, 1, tokensLength) - for _, token := range permission[1:] { - tokens, err := e.GetImplicitUsersForRole(token, domain...) - if err != nil { - return nil, err - } - tokens = append(tokens, token) - t = append(t, tokens) - } - for i := 1; i < tokensLength; i++ { - n := make([][]string, 0) - for _, tokens := range t[i] { - for _, policy := range resLocal { - t := append([]string(nil), policy...) - t = append(t, tokens) - n = append(n, t) - } - } - resLocal = n - } - res = append(res, resLocal...) - } - return res, nil -} - -// deepCopyPolicy returns a deepcopy version of the policy to prevent changing policies through returned slice. -func deepCopyPolicy(src []string) []string { - newRule := make([]string, len(src)) - copy(newRule, src) - return newRule -} - -// GetAllowedObjectConditions returns a string array of object conditions that the user can access. -// For example: conditions, err := e.GetAllowedObjectConditions("alice", "read", "r.obj.") -// Note: -// -// 0. prefix: You can customize the prefix of the object conditions, and "r.obj." is commonly used as a prefix. -// After removing the prefix, the remaining part is the condition of the object. -// If there is an obj policy that does not meet the prefix requirement, an errors.ERR_OBJ_CONDITION will be returned. -// -// 1. If the 'objectConditions' array is empty, return errors.ERR_EMPTY_CONDITION -// This error is returned because some data adapters' ORM return full table data by default -// when they receive an empty condition, which tends to behave contrary to expectations.(e.g. GORM) -// If you are using an adapter that does not behave like this, you can choose to ignore this error. -func (e *Enforcer) GetAllowedObjectConditions(user string, action string, prefix string) ([]string, error) { - permissions, err := e.GetImplicitPermissionsForUser(user) - if err != nil { - return nil, err - } - - var objectConditions []string - for _, policy := range permissions { - // policy {sub, obj, act} - if policy[2] == action { - if !strings.HasPrefix(policy[1], prefix) { - return nil, errors.ErrObjCondition - } - objectConditions = append(objectConditions, strings.TrimPrefix(policy[1], prefix)) - } - } - - if len(objectConditions) == 0 { - return nil, errors.ErrEmptyCondition - } - - return objectConditions, nil -} - -// removeDuplicatePermissions Convert permissions to string as a hash to deduplicate. -func removeDuplicatePermissions(permissions [][]string) [][]string { - permissionsSet := make(map[string]bool) - res := make([][]string, 0) - for _, permission := range permissions { - permissionStr := util.ArrayToString(permission) - if permissionsSet[permissionStr] { - continue - } - permissionsSet[permissionStr] = true - res = append(res, permission) - } - return res -} - -// GetImplicitUsersForResource return implicit user based on resource. -// for example: -// p, alice, data1, read -// p, bob, data2, write -// p, data2_admin, data2, read -// p, data2_admin, data2, write -// g, alice, data2_admin -// GetImplicitUsersForResource("data2") will return [[bob data2 write] [alice data2 read] [alice data2 write]] -// GetImplicitUsersForResource("data1") will return [[alice data1 read]] -// Note: only users will be returned, roles (2nd arg in "g") will be excluded. -func (e *Enforcer) GetImplicitUsersForResource(resource string) ([][]string, error) { - permissions := make([][]string, 0) - subjectIndex, _ := e.GetFieldIndex("p", "sub") - objectIndex, _ := e.GetFieldIndex("p", "obj") - rm := e.GetRoleManager() - if rm == nil { - return nil, fmt.Errorf("role manager is not initialized") - } - - isRole := make(map[string]bool) - roles, err := e.GetAllRoles() - if err != nil { - return nil, err - } - for _, role := range roles { - isRole[role] = true - } - - for _, rule := range e.model["p"]["p"].Policy { - obj := rule[objectIndex] - if obj != resource { - continue - } - - sub := rule[subjectIndex] - - if !isRole[sub] { - permissions = append(permissions, rule) - } else { - users, err := rm.GetUsers(sub) - if err != nil { - return nil, err - } - - for _, user := range users { - implicitUserRule := deepCopyPolicy(rule) - implicitUserRule[subjectIndex] = user - permissions = append(permissions, implicitUserRule) - } - } - } - - res := removeDuplicatePermissions(permissions) - return res, nil -} - -// GetImplicitUsersForResourceByDomain return implicit user based on resource and domain. -// Compared to GetImplicitUsersForResource, domain is supported. -func (e *Enforcer) GetImplicitUsersForResourceByDomain(resource string, domain string) ([][]string, error) { - permissions := make([][]string, 0) - subjectIndex, _ := e.GetFieldIndex("p", "sub") - objectIndex, _ := e.GetFieldIndex("p", "obj") - domIndex, _ := e.GetFieldIndex("p", "dom") - rm := e.GetRoleManager() - if rm == nil { - return nil, fmt.Errorf("role manager is not initialized") - } - - isRole := make(map[string]bool) - - if roles, err := e.GetAllRolesByDomain(domain); err != nil { - return nil, err - } else { - for _, role := range roles { - isRole[role] = true - } - } - - for _, rule := range e.model["p"]["p"].Policy { - obj := rule[objectIndex] - if obj != resource { - continue - } - - sub := rule[subjectIndex] - - if !isRole[sub] { - permissions = append(permissions, rule) - } else { - if domain != rule[domIndex] { - continue - } - users, err := rm.GetUsers(sub, domain) - if err != nil { - return nil, err - } - - for _, user := range users { - implicitUserRule := deepCopyPolicy(rule) - implicitUserRule[subjectIndex] = user - permissions = append(permissions, implicitUserRule) - } - } - } - - res := removeDuplicatePermissions(permissions) - return res, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go b/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go deleted file mode 100644 index 69b6295a..00000000 --- a/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -// GetRolesForUser gets the roles that a user has. -func (e *SyncedEnforcer) GetRolesForUser(name string, domain ...string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetRolesForUser(name, domain...) -} - -// GetUsersForRole gets the users that has a role. -func (e *SyncedEnforcer) GetUsersForRole(name string, domain ...string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetUsersForRole(name, domain...) -} - -// HasRoleForUser determines whether a user has a role. -func (e *SyncedEnforcer) HasRoleForUser(name string, role string, domain ...string) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.HasRoleForUser(name, role, domain...) -} - -// AddRoleForUser adds a role for a user. -// Returns false if the user already has the role (aka not affected). -func (e *SyncedEnforcer) AddRoleForUser(user string, role string, domain ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddRoleForUser(user, role, domain...) -} - -// AddRolesForUser adds roles for a user. -// Returns false if the user already has the roles (aka not affected). -func (e *SyncedEnforcer) AddRolesForUser(user string, roles []string, domain ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddRolesForUser(user, roles, domain...) -} - -// DeleteRoleForUser deletes a role for a user. -// Returns false if the user does not have the role (aka not affected). -func (e *SyncedEnforcer) DeleteRoleForUser(user string, role string, domain ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeleteRoleForUser(user, role, domain...) -} - -// DeleteRolesForUser deletes all roles for a user. -// Returns false if the user does not have any roles (aka not affected). -func (e *SyncedEnforcer) DeleteRolesForUser(user string, domain ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeleteRolesForUser(user, domain...) -} - -// DeleteUser deletes a user. -// Returns false if the user does not exist (aka not affected). -func (e *SyncedEnforcer) DeleteUser(user string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeleteUser(user) -} - -// DeleteRole deletes a role. -// Returns false if the role does not exist (aka not affected). -func (e *SyncedEnforcer) DeleteRole(role string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeleteRole(role) -} - -// DeletePermission deletes a permission. -// Returns false if the permission does not exist (aka not affected). -func (e *SyncedEnforcer) DeletePermission(permission ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeletePermission(permission...) -} - -// AddPermissionForUser adds a permission for a user or role. -// Returns false if the user or role already has the permission (aka not affected). -func (e *SyncedEnforcer) AddPermissionForUser(user string, permission ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddPermissionForUser(user, permission...) -} - -// AddPermissionsForUser adds permissions for a user or role. -// Returns false if the user or role already has the permissions (aka not affected). -func (e *SyncedEnforcer) AddPermissionsForUser(user string, permissions ...[]string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddPermissionsForUser(user, permissions...) -} - -// DeletePermissionForUser deletes a permission for a user or role. -// Returns false if the user or role does not have the permission (aka not affected). -func (e *SyncedEnforcer) DeletePermissionForUser(user string, permission ...string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeletePermissionForUser(user, permission...) -} - -// DeletePermissionsForUser deletes permissions for a user or role. -// Returns false if the user or role does not have any permissions (aka not affected). -func (e *SyncedEnforcer) DeletePermissionsForUser(user string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeletePermissionsForUser(user) -} - -// GetPermissionsForUser gets permissions for a user or role. -func (e *SyncedEnforcer) GetPermissionsForUser(user string, domain ...string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetPermissionsForUser(user, domain...) -} - -// GetNamedPermissionsForUser gets permissions for a user or role by named policy. -func (e *SyncedEnforcer) GetNamedPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetNamedPermissionsForUser(ptype, user, domain...) -} - -// HasPermissionForUser determines whether a user has a permission. -func (e *SyncedEnforcer) HasPermissionForUser(user string, permission ...string) (bool, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.HasPermissionForUser(user, permission...) -} - -// GetImplicitRolesForUser gets implicit roles that a user has. -// Compared to GetRolesForUser(), this function retrieves indirect roles besides direct roles. -// For example: -// g, alice, role:admin -// g, role:admin, role:user -// -// GetRolesForUser("alice") can only get: ["role:admin"]. -// But GetImplicitRolesForUser("alice") will get: ["role:admin", "role:user"]. -func (e *SyncedEnforcer) GetImplicitRolesForUser(name string, domain ...string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetImplicitRolesForUser(name, domain...) -} - -// GetImplicitPermissionsForUser gets implicit permissions for a user or role. -// Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles. -// For example: -// p, admin, data1, read -// p, alice, data2, read -// g, alice, admin -// -// GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]]. -// But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]]. -func (e *SyncedEnforcer) GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.GetImplicitPermissionsForUser(user, domain...) -} - -// GetNamedImplicitPermissionsForUser gets implicit permissions for a user or role by named policy. -// Compared to GetNamedPermissionsForUser(), this function retrieves permissions for inherited roles. -// For example: -// p, admin, data1, read -// p2, admin, create -// g, alice, admin -// -// GetImplicitPermissionsForUser("alice") can only get: [["admin", "data1", "read"]], whose policy is default policy "p" -// But you can specify the named policy "p2" to get: [["admin", "create"]] by GetNamedImplicitPermissionsForUser("p2","alice"). -func (e *SyncedEnforcer) GetNamedImplicitPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetNamedImplicitPermissionsForUser(ptype, user, domain...) -} - -// GetImplicitUsersForPermission gets implicit users for a permission. -// For example: -// p, admin, data1, read -// p, bob, data1, read -// g, alice, admin -// -// GetImplicitUsersForPermission("data1", "read") will get: ["alice", "bob"]. -// Note: only users will be returned, roles (2nd arg in "g") will be excluded. -func (e *SyncedEnforcer) GetImplicitUsersForPermission(permission ...string) ([]string, error) { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetImplicitUsersForPermission(permission...) -} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go deleted file mode 100644 index f6fc4a24..00000000 --- a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -import ( - "fmt" - - "github.com/casbin/casbin/v2/constant" -) - -// GetUsersForRoleInDomain gets the users that has a role inside a domain. Add by Gordon. -func (e *Enforcer) GetUsersForRoleInDomain(name string, domain string) []string { - if e.GetRoleManager() == nil { - return nil - } - res, _ := e.GetRoleManager().GetUsers(name, domain) - return res -} - -// GetRolesForUserInDomain gets the roles that a user has inside a domain. -func (e *Enforcer) GetRolesForUserInDomain(name string, domain string) []string { - if e.GetRoleManager() == nil { - return nil - } - res, _ := e.GetRoleManager().GetRoles(name, domain) - return res -} - -// GetPermissionsForUserInDomain gets permissions for a user or role inside a domain. -func (e *Enforcer) GetPermissionsForUserInDomain(user string, domain string) [][]string { - res, _ := e.GetImplicitPermissionsForUser(user, domain) - return res -} - -// AddRoleForUserInDomain adds a role for a user inside a domain. -// Returns false if the user already has the role (aka not affected). -func (e *Enforcer) AddRoleForUserInDomain(user string, role string, domain string) (bool, error) { - return e.AddGroupingPolicy(user, role, domain) -} - -// DeleteRoleForUserInDomain deletes a role for a user inside a domain. -// Returns false if the user does not have the role (aka not affected). -func (e *Enforcer) DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) { - return e.RemoveGroupingPolicy(user, role, domain) -} - -// DeleteRolesForUserInDomain deletes all roles for a user inside a domain. -// Returns false if the user does not have any roles (aka not affected). -func (e *Enforcer) DeleteRolesForUserInDomain(user string, domain string) (bool, error) { - if e.GetRoleManager() == nil { - return false, fmt.Errorf("role manager is not initialized") - } - roles, err := e.GetRoleManager().GetRoles(user, domain) - if err != nil { - return false, err - } - - var rules [][]string - for _, role := range roles { - rules = append(rules, []string{user, role, domain}) - } - - return e.RemoveGroupingPolicies(rules) -} - -// GetAllUsersByDomain would get all users associated with the domain. -func (e *Enforcer) GetAllUsersByDomain(domain string) ([]string, error) { - m := make(map[string]struct{}) - g, err := e.model.GetAssertion("g", "g") - if err != nil { - return []string{}, err - } - p := e.model["p"]["p"] - users := make([]string, 0) - index, err := e.GetFieldIndex("p", constant.DomainIndex) - if err != nil { - return []string{}, err - } - - getUser := func(index int, policies [][]string, domain string, m map[string]struct{}) []string { - if len(policies) == 0 || len(policies[0]) <= index { - return []string{} - } - res := make([]string, 0) - for _, policy := range policies { - if _, ok := m[policy[0]]; policy[index] == domain && !ok { - res = append(res, policy[0]) - m[policy[0]] = struct{}{} - } - } - return res - } - - users = append(users, getUser(2, g.Policy, domain, m)...) - users = append(users, getUser(index, p.Policy, domain, m)...) - return users, nil -} - -// DeleteAllUsersByDomain would delete all users associated with the domain. -func (e *Enforcer) DeleteAllUsersByDomain(domain string) (bool, error) { - g, err := e.model.GetAssertion("g", "g") - if err != nil { - return false, err - } - p := e.model["p"]["p"] - index, err := e.GetFieldIndex("p", constant.DomainIndex) - if err != nil { - return false, err - } - - getUser := func(index int, policies [][]string, domain string) [][]string { - if len(policies) == 0 || len(policies[0]) <= index { - return [][]string{} - } - res := make([][]string, 0) - for _, policy := range policies { - if policy[index] == domain { - res = append(res, policy) - } - } - return res - } - - users := getUser(2, g.Policy, domain) - if _, err = e.RemoveGroupingPolicies(users); err != nil { - return false, err - } - users = getUser(index, p.Policy, domain) - if _, err = e.RemovePolicies(users); err != nil { - return false, err - } - return true, nil -} - -// DeleteDomains would delete all associated users and roles. -// It would delete all domains if parameter is not provided. -func (e *Enforcer) DeleteDomains(domains ...string) (bool, error) { - if len(domains) == 0 { - e.ClearPolicy() - return true, nil - } - for _, domain := range domains { - if _, err := e.DeleteAllUsersByDomain(domain); err != nil { - return false, err - } - } - return true, nil -} - -// GetAllDomains would get all domains. -func (e *Enforcer) GetAllDomains() ([]string, error) { - if e.GetRoleManager() == nil { - return nil, fmt.Errorf("role manager is not initialized") - } - return e.GetRoleManager().GetAllDomains() -} - -// GetAllRolesByDomain would get all roles associated with the domain. -// note: Not applicable to Domains with inheritance relationship (implicit roles) -func (e *Enforcer) GetAllRolesByDomain(domain string) ([]string, error) { - g, err := e.model.GetAssertion("g", "g") - if err != nil { - return []string{}, err - } - policies := g.Policy - roles := make([]string, 0) - existMap := make(map[string]bool) // remove duplicates - - for _, policy := range policies { - if policy[len(policy)-1] == domain { - role := policy[len(policy)-2] - if _, ok := existMap[role]; !ok { - roles = append(roles, role) - existMap[role] = true - } - } - } - - return roles, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go deleted file mode 100644 index 26f6ce4b..00000000 --- a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package casbin - -// GetUsersForRoleInDomain gets the users that has a role inside a domain. Add by Gordon. -func (e *SyncedEnforcer) GetUsersForRoleInDomain(name string, domain string) []string { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetUsersForRoleInDomain(name, domain) -} - -// GetRolesForUserInDomain gets the roles that a user has inside a domain. -func (e *SyncedEnforcer) GetRolesForUserInDomain(name string, domain string) []string { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetRolesForUserInDomain(name, domain) -} - -// GetPermissionsForUserInDomain gets permissions for a user or role inside a domain. -func (e *SyncedEnforcer) GetPermissionsForUserInDomain(user string, domain string) [][]string { - e.m.RLock() - defer e.m.RUnlock() - return e.Enforcer.GetPermissionsForUserInDomain(user, domain) -} - -// AddRoleForUserInDomain adds a role for a user inside a domain. -// Returns false if the user already has the role (aka not affected). -func (e *SyncedEnforcer) AddRoleForUserInDomain(user string, role string, domain string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.AddRoleForUserInDomain(user, role, domain) -} - -// DeleteRoleForUserInDomain deletes a role for a user inside a domain. -// Returns false if the user does not have the role (aka not affected). -func (e *SyncedEnforcer) DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeleteRoleForUserInDomain(user, role, domain) -} - -// DeleteRolesForUserInDomain deletes all roles for a user inside a domain. -// Returns false if the user does not have any roles (aka not affected). -func (e *SyncedEnforcer) DeleteRolesForUserInDomain(user string, domain string) (bool, error) { - e.m.Lock() - defer e.m.Unlock() - return e.Enforcer.DeleteRolesForUserInDomain(user, domain) -} diff --git a/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go b/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go deleted file mode 100644 index bab9b0b3..00000000 --- a/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "errors" - "fmt" - "net" - "path" - "regexp" - "strings" - "sync" - "time" - - "github.com/casbin/casbin/v2/rbac" - - "github.com/casbin/govaluate" -) - -var ( - keyMatch4Re *regexp.Regexp = regexp.MustCompile(`{([^/]+)}`) -) - -// validate the variadic parameter size and type as string. -func validateVariadicArgs(expectedLen int, args ...interface{}) error { - if len(args) != expectedLen { - return fmt.Errorf("expected %d arguments, but got %d", expectedLen, len(args)) - } - - for _, p := range args { - _, ok := p.(string) - if !ok { - return errors.New("argument must be a string") - } - } - - return nil -} - -// validate the variadic string parameter size. -func validateVariadicStringArgs(expectedLen int, args ...string) error { - if len(args) != expectedLen { - return fmt.Errorf("expected %d arguments, but got %d", expectedLen, len(args)) - } - return nil -} - -// KeyMatch determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. -// For example, "/foo/bar" matches "/foo/*". -func KeyMatch(key1 string, key2 string) bool { - i := strings.Index(key2, "*") - if i == -1 { - return key1 == key2 - } - - if len(key1) > i { - return key1[:i] == key2[:i] - } - return key1 == key2[:i] -} - -// KeyMatchFunc is the wrapper for KeyMatch. -func KeyMatchFunc(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyMatch", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return KeyMatch(name1, name2), nil -} - -// KeyGet returns the matched part -// For example, "/foo/bar/foo" matches "/foo/*" -// "bar/foo" will been returned. -func KeyGet(key1, key2 string) string { - i := strings.Index(key2, "*") - if i == -1 { - return "" - } - if len(key1) > i { - if key1[:i] == key2[:i] { - return key1[i:] - } - } - return "" -} - -// KeyGetFunc is the wrapper for KeyGet. -func KeyGetFunc(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyGet", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return KeyGet(name1, name2), nil -} - -// KeyMatch2 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. -// For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/:resource". -func KeyMatch2(key1 string, key2 string) bool { - key2 = strings.Replace(key2, "/*", "/.*", -1) - - re := regexp.MustCompile(`:[^/]+`) - key2 = re.ReplaceAllString(key2, "$1[^/]+$2") - - return RegexMatch(key1, "^"+key2+"$") -} - -// KeyMatch2Func is the wrapper for KeyMatch2. -func KeyMatch2Func(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyMatch2", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return KeyMatch2(name1, name2), nil -} - -// KeyGet2 returns value matched pattern -// For example, "/resource1" matches "/:resource" -// if the pathVar == "resource", then "resource1" will be returned. -func KeyGet2(key1, key2 string, pathVar string) string { - key2 = strings.Replace(key2, "/*", "/.*", -1) - - re := regexp.MustCompile(`:[^/]+`) - keys := re.FindAllString(key2, -1) - key2 = re.ReplaceAllString(key2, "$1([^/]+)$2") - key2 = "^" + key2 + "$" - re2 := regexp.MustCompile(key2) - values := re2.FindAllStringSubmatch(key1, -1) - if len(values) == 0 { - return "" - } - for i, key := range keys { - if pathVar == key[1:] { - return values[0][i+1] - } - } - return "" -} - -// KeyGet2Func is the wrapper for KeyGet2. -func KeyGet2Func(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(3, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyGet2", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - key := args[2].(string) - - return KeyGet2(name1, name2, key), nil -} - -// KeyMatch3 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. -// For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/{resource}". -func KeyMatch3(key1 string, key2 string) bool { - key2 = strings.Replace(key2, "/*", "/.*", -1) - - re := regexp.MustCompile(`\{[^/]+\}`) - key2 = re.ReplaceAllString(key2, "$1[^/]+$2") - - return RegexMatch(key1, "^"+key2+"$") -} - -// KeyMatch3Func is the wrapper for KeyMatch3. -func KeyMatch3Func(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyMatch3", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return KeyMatch3(name1, name2), nil -} - -// KeyGet3 returns value matched pattern -// For example, "project/proj_project1_admin/" matches "project/proj_{project}_admin/" -// if the pathVar == "project", then "project1" will be returned. -func KeyGet3(key1, key2 string, pathVar string) string { - key2 = strings.Replace(key2, "/*", "/.*", -1) - - re := regexp.MustCompile(`\{[^/]+?\}`) // non-greedy match of `{...}` to support multiple {} in `/.../` - keys := re.FindAllString(key2, -1) - key2 = re.ReplaceAllString(key2, "$1([^/]+?)$2") - key2 = "^" + key2 + "$" - re2 := regexp.MustCompile(key2) - values := re2.FindAllStringSubmatch(key1, -1) - if len(values) == 0 { - return "" - } - for i, key := range keys { - if pathVar == key[1:len(key)-1] { - return values[0][i+1] - } - } - return "" -} - -// KeyGet3Func is the wrapper for KeyGet3. -func KeyGet3Func(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(3, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyGet3", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - key := args[2].(string) - - return KeyGet3(name1, name2, key), nil -} - -// KeyMatch4 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. -// Besides what KeyMatch3 does, KeyMatch4 can also match repeated patterns: -// "/parent/123/child/123" matches "/parent/{id}/child/{id}" -// "/parent/123/child/456" does not match "/parent/{id}/child/{id}" -// But KeyMatch3 will match both. -func KeyMatch4(key1 string, key2 string) bool { - key2 = strings.Replace(key2, "/*", "/.*", -1) - - tokens := []string{} - - re := keyMatch4Re - key2 = re.ReplaceAllStringFunc(key2, func(s string) string { - tokens = append(tokens, s[1:len(s)-1]) - return "([^/]+)" - }) - - re = regexp.MustCompile("^" + key2 + "$") - matches := re.FindStringSubmatch(key1) - if matches == nil { - return false - } - matches = matches[1:] - - if len(tokens) != len(matches) { - panic(errors.New("KeyMatch4: number of tokens is not equal to number of values")) - } - - values := map[string]string{} - - for key, token := range tokens { - if _, ok := values[token]; !ok { - values[token] = matches[key] - } - if values[token] != matches[key] { - return false - } - } - - return true -} - -// KeyMatch4Func is the wrapper for KeyMatch4. -func KeyMatch4Func(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyMatch4", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return KeyMatch4(name1, name2), nil -} - -// KeyMatch5 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a * -// For example, -// - "/foo/bar?status=1&type=2" matches "/foo/bar" -// - "/parent/child1" and "/parent/child1" matches "/parent/*" -// - "/parent/child1?status=1" matches "/parent/*". -func KeyMatch5(key1 string, key2 string) bool { - i := strings.Index(key1, "?") - - if i != -1 { - key1 = key1[:i] - } - - key2 = strings.Replace(key2, "/*", "/.*", -1) - - re := regexp.MustCompile(`\{[^/]+\}`) - key2 = re.ReplaceAllString(key2, "$1[^/]+$2") - - return RegexMatch(key1, "^"+key2+"$") -} - -// KeyMatch5Func is the wrapper for KeyMatch5. -func KeyMatch5Func(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "keyMatch5", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return KeyMatch5(name1, name2), nil -} - -// RegexMatch determines whether key1 matches the pattern of key2 in regular expression. -func RegexMatch(key1 string, key2 string) bool { - res, err := regexp.MatchString(key2, key1) - if err != nil { - panic(err) - } - return res -} - -// RegexMatchFunc is the wrapper for RegexMatch. -func RegexMatchFunc(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "regexMatch", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return RegexMatch(name1, name2), nil -} - -// IPMatch determines whether IP address ip1 matches the pattern of IP address ip2, ip2 can be an IP address or a CIDR pattern. -// For example, "192.168.2.123" matches "192.168.2.0/24". -func IPMatch(ip1 string, ip2 string) bool { - objIP1 := net.ParseIP(ip1) - if objIP1 == nil { - panic("invalid argument: ip1 in IPMatch() function is not an IP address.") - } - - _, cidr, err := net.ParseCIDR(ip2) - if err != nil { - objIP2 := net.ParseIP(ip2) - if objIP2 == nil { - panic("invalid argument: ip2 in IPMatch() function is neither an IP address nor a CIDR.") - } - - return objIP1.Equal(objIP2) - } - - return cidr.Contains(objIP1) -} - -// IPMatchFunc is the wrapper for IPMatch. -func IPMatchFunc(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "ipMatch", err) - } - - ip1 := args[0].(string) - ip2 := args[1].(string) - - return IPMatch(ip1, ip2), nil -} - -// GlobMatch determines whether key1 matches the pattern of key2 using glob pattern. -func GlobMatch(key1 string, key2 string) (bool, error) { - return path.Match(key2, key1) -} - -// GlobMatchFunc is the wrapper for GlobMatch. -func GlobMatchFunc(args ...interface{}) (interface{}, error) { - if err := validateVariadicArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "globMatch", err) - } - - name1 := args[0].(string) - name2 := args[1].(string) - - return GlobMatch(name1, name2) -} - -// GenerateGFunction is the factory method of the g(_, _[, _]) function. -func GenerateGFunction(rm rbac.RoleManager) govaluate.ExpressionFunction { - memorized := sync.Map{} - return func(args ...interface{}) (interface{}, error) { - // Like all our other govaluate functions, all args are strings. - - // Allocate and generate a cache key from the arguments... - total := len(args) - for _, a := range args { - aStr := a.(string) - total += len(aStr) - } - builder := strings.Builder{} - builder.Grow(total) - for _, arg := range args { - builder.WriteByte(0) - builder.WriteString(arg.(string)) - } - key := builder.String() - - // ...and see if we've already calculated this. - v, found := memorized.Load(key) - if found { - return v, nil - } - - // If not, do the calculation. - // There are guaranteed to be exactly 2 or 3 arguments. - name1, name2 := args[0].(string), args[1].(string) - if rm == nil { - v = name1 == name2 - } else if len(args) == 2 { - v, _ = rm.HasLink(name1, name2) - } else { - domain := args[2].(string) - v, _ = rm.HasLink(name1, name2, domain) - } - - memorized.Store(key, v) - return v, nil - } -} - -// GenerateConditionalGFunction is the factory method of the g(_, _[, _]) function with conditions. -func GenerateConditionalGFunction(crm rbac.ConditionalRoleManager) govaluate.ExpressionFunction { - return func(args ...interface{}) (interface{}, error) { - // Like all our other govaluate functions, all args are strings. - var hasLink bool - - name1, name2 := args[0].(string), args[1].(string) - if crm == nil { - hasLink = name1 == name2 - } else if len(args) == 2 { - hasLink, _ = crm.HasLink(name1, name2) - } else { - domain := args[2].(string) - hasLink, _ = crm.HasLink(name1, name2, domain) - } - - return hasLink, nil - } -} - -// builtin LinkConditionFunc - -// TimeMatchFunc is the wrapper for TimeMatch. -func TimeMatchFunc(args ...string) (bool, error) { - if err := validateVariadicStringArgs(2, args...); err != nil { - return false, fmt.Errorf("%s: %w", "TimeMatch", err) - } - return TimeMatch(args[0], args[1]) -} - -// TimeMatch determines whether the current time is between startTime and endTime. -// You can use "_" to indicate that the parameter is ignored. -func TimeMatch(startTime, endTime string) (bool, error) { - now := time.Now() - if startTime != "_" { - if start, err := time.Parse("2006-01-02 15:04:05", startTime); err != nil { - return false, err - } else if !now.After(start) { - return false, nil - } - } - - if endTime != "_" { - if end, err := time.Parse("2006-01-02 15:04:05", endTime); err != nil { - return false, err - } else if !now.Before(end) { - return false, nil - } - } - - return true, nil -} diff --git a/vendor/github.com/casbin/casbin/v2/util/util.go b/vendor/github.com/casbin/casbin/v2/util/util.go deleted file mode 100644 index f247b27b..00000000 --- a/vendor/github.com/casbin/casbin/v2/util/util.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2017 The casbin Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "encoding/json" - "regexp" - "sort" - "strings" - "sync" -) - -var evalReg = regexp.MustCompile(`\beval\((?P[^)]*)\)`) - -var escapeAssertionRegex = regexp.MustCompile(`\b((r|p)[0-9]*)\.`) - -func JsonToMap(jsonStr string) (map[string]interface{}, error) { - result := make(map[string]interface{}) - err := json.Unmarshal([]byte(jsonStr), &result) - if err != nil { - return result, err - } - return result, nil -} - -// EscapeAssertion escapes the dots in the assertion, because the expression evaluation doesn't support such variable names. -func EscapeAssertion(s string) string { - s = escapeAssertionRegex.ReplaceAllStringFunc(s, func(m string) string { - return strings.Replace(m, ".", "_", 1) - }) - return s -} - -// RemoveComments removes the comments starting with # in the text. -func RemoveComments(s string) string { - pos := strings.Index(s, "#") - if pos == -1 { - return s - } - return strings.TrimSpace(s[0:pos]) -} - -// ArrayEquals determines whether two string arrays are identical. -func ArrayEquals(a []string, b []string) bool { - if len(a) != len(b) { - return false - } - - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -// Array2DEquals determines whether two 2-dimensional string arrays are identical. -func Array2DEquals(a [][]string, b [][]string) bool { - if len(a) != len(b) { - return false - } - - for i, v := range a { - if !ArrayEquals(v, b[i]) { - return false - } - } - return true -} - -// SortArray2D Sorts the two-dimensional string array. -func SortArray2D(arr [][]string) { - if len(arr) != 0 { - sort.Slice(arr, func(i, j int) bool { - elementLen := len(arr[0]) - for k := 0; k < elementLen; k++ { - if arr[i][k] < arr[j][k] { - return true - } else if arr[i][k] > arr[j][k] { - return false - } - } - return true - }) - } -} - -// SortedArray2DEquals determines whether two 2-dimensional string arrays are identical. -func SortedArray2DEquals(a [][]string, b [][]string) bool { - if len(a) != len(b) { - return false - } - copyA := make([][]string, len(a)) - copy(copyA, a) - copyB := make([][]string, len(b)) - copy(copyB, b) - - SortArray2D(copyA) - SortArray2D(copyB) - - for i, v := range copyA { - if !ArrayEquals(v, copyB[i]) { - return false - } - } - return true -} - -// ArrayRemoveDuplicates removes any duplicated elements in a string array. -func ArrayRemoveDuplicates(s *[]string) { - found := make(map[string]bool) - j := 0 - for i, x := range *s { - if !found[x] { - found[x] = true - (*s)[j] = (*s)[i] - j++ - } - } - *s = (*s)[:j] -} - -// ArrayToString gets a printable string for a string array. -func ArrayToString(s []string) string { - return strings.Join(s, ", ") -} - -// ParamsToString gets a printable string for variable number of parameters. -func ParamsToString(s ...string) string { - return strings.Join(s, ", ") -} - -// SetEquals determines whether two string sets are identical. -func SetEquals(a []string, b []string) bool { - if len(a) != len(b) { - return false - } - - sort.Strings(a) - sort.Strings(b) - - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -// SetEquals determines whether two int sets are identical. -func SetEqualsInt(a []int, b []int) bool { - if len(a) != len(b) { - return false - } - - sort.Ints(a) - sort.Ints(b) - - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -// Set2DEquals determines whether two string slice sets are identical. -func Set2DEquals(a [][]string, b [][]string) bool { - if len(a) != len(b) { - return false - } - - var aa []string - for _, v := range a { - sort.Strings(v) - aa = append(aa, strings.Join(v, ", ")) - } - var bb []string - for _, v := range b { - sort.Strings(v) - bb = append(bb, strings.Join(v, ", ")) - } - - return SetEquals(aa, bb) -} - -// JoinSlice joins a string and a slice into a new slice. -func JoinSlice(a string, b ...string) []string { - res := make([]string, 0, len(b)+1) - - res = append(res, a) - res = append(res, b...) - - return res -} - -// JoinSliceAny joins a string and a slice into a new interface{} slice. -func JoinSliceAny(a string, b ...string) []interface{} { - res := make([]interface{}, 0, len(b)+1) - - res = append(res, a) - for _, s := range b { - res = append(res, s) - } - - return res -} - -// SetSubtract returns the elements in `a` that aren't in `b`. -func SetSubtract(a []string, b []string) []string { - mb := make(map[string]struct{}, len(b)) - for _, x := range b { - mb[x] = struct{}{} - } - var diff []string - for _, x := range a { - if _, found := mb[x]; !found { - diff = append(diff, x) - } - } - return diff -} - -// HasEval determine whether matcher contains function eval. -func HasEval(s string) bool { - return evalReg.MatchString(s) -} - -// ReplaceEval replace function eval with the value of its parameters. -func ReplaceEval(s string, rule string) string { - return evalReg.ReplaceAllString(s, "("+rule+")") -} - -// ReplaceEvalWithMap replace function eval with the value of its parameters via given sets. -func ReplaceEvalWithMap(src string, sets map[string]string) string { - return evalReg.ReplaceAllStringFunc(src, func(s string) string { - subs := evalReg.FindStringSubmatch(s) - if subs == nil { - return s - } - key := subs[1] - value, found := sets[key] - if !found { - return s - } - return evalReg.ReplaceAllString(s, value) - }) -} - -// GetEvalValue returns the parameters of function eval. -func GetEvalValue(s string) []string { - subMatch := evalReg.FindAllStringSubmatch(s, -1) - var rules []string - for _, rule := range subMatch { - rules = append(rules, rule[1]) - } - return rules -} - -func RemoveDuplicateElement(s []string) []string { - result := make([]string, 0, len(s)) - temp := map[string]struct{}{} - for _, item := range s { - if _, ok := temp[item]; !ok { - temp[item] = struct{}{} - result = append(result, item) - } - } - return result -} - -type node struct { - key interface{} - value interface{} - prev *node - next *node -} - -type LRUCache struct { - capacity int - m map[interface{}]*node - head *node - tail *node -} - -func NewLRUCache(capacity int) *LRUCache { - cache := &LRUCache{} - cache.capacity = capacity - cache.m = map[interface{}]*node{} - - head := &node{} - tail := &node{} - - head.next = tail - tail.prev = head - - cache.head = head - cache.tail = tail - - return cache -} - -func (cache *LRUCache) remove(n *node, listOnly bool) { - if !listOnly { - delete(cache.m, n.key) - } - n.prev.next = n.next - n.next.prev = n.prev -} - -func (cache *LRUCache) add(n *node, listOnly bool) { - if !listOnly { - cache.m[n.key] = n - } - headNext := cache.head.next - cache.head.next = n - headNext.prev = n - n.next = headNext - n.prev = cache.head -} - -func (cache *LRUCache) moveToHead(n *node) { - cache.remove(n, true) - cache.add(n, true) -} - -func (cache *LRUCache) Get(key interface{}) (value interface{}, ok bool) { - n, ok := cache.m[key] - if ok { - cache.moveToHead(n) - return n.value, ok - } else { - return nil, ok - } -} - -func (cache *LRUCache) Put(key interface{}, value interface{}) { - n, ok := cache.m[key] - if ok { - cache.remove(n, false) - } else { - n = &node{key, value, nil, nil} - if len(cache.m) >= cache.capacity { - cache.remove(cache.tail.prev, false) - } - } - cache.add(n, false) -} - -type SyncLRUCache struct { - rwm sync.RWMutex - *LRUCache -} - -func NewSyncLRUCache(capacity int) *SyncLRUCache { - cache := &SyncLRUCache{} - cache.LRUCache = NewLRUCache(capacity) - return cache -} - -func (cache *SyncLRUCache) Get(key interface{}) (value interface{}, ok bool) { - cache.rwm.Lock() - defer cache.rwm.Unlock() - return cache.LRUCache.Get(key) -} - -func (cache *SyncLRUCache) Put(key interface{}, value interface{}) { - cache.rwm.Lock() - defer cache.rwm.Unlock() - cache.LRUCache.Put(key, value) -} diff --git a/vendor/github.com/casbin/govaluate/.gitignore b/vendor/github.com/casbin/govaluate/.gitignore deleted file mode 100644 index 5ac0c3fc..00000000 --- a/vendor/github.com/casbin/govaluate/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -coverage.out - -manual_test.go -*.out -*.err diff --git a/vendor/github.com/casbin/govaluate/.releaserc.json b/vendor/github.com/casbin/govaluate/.releaserc.json deleted file mode 100644 index 58cb0bb4..00000000 --- a/vendor/github.com/casbin/govaluate/.releaserc.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "debug": true, - "branches": [ - "+([0-9])?(.{+([0-9]),x}).x", - "master", - { - "name": "beta", - "prerelease": true - } - ], - "plugins": [ - "@semantic-release/commit-analyzer", - "@semantic-release/release-notes-generator", - "@semantic-release/github" - ] -} diff --git a/vendor/github.com/casbin/govaluate/CONTRIBUTORS b/vendor/github.com/casbin/govaluate/CONTRIBUTORS deleted file mode 100644 index c1a7fe42..00000000 --- a/vendor/github.com/casbin/govaluate/CONTRIBUTORS +++ /dev/null @@ -1,15 +0,0 @@ -This library was authored by George Lester, and contains contributions from: - -vjeantet (regex support) -iasci (ternary operator) -oxtoacart (parameter structures, deferred parameter retrieval) -wmiller848 (bitwise operators) -prashantv (optimization of bools) -dpaolella (exposure of variables used in an expression) -benpaxton (fix for missing type checks during literal elide process) -abrander (panic-finding testing tool, float32 conversions) -xfennec (fix for dates being parsed in the current Location) -bgaifullin (lifting restriction on complex/struct types) -gautambt (hexadecimal literals) -felixonmars (fix multiple typos in test names) -sambonfire (automatic type conversion for accessor function calls) \ No newline at end of file diff --git a/vendor/github.com/casbin/govaluate/EvaluableExpression.go b/vendor/github.com/casbin/govaluate/EvaluableExpression.go deleted file mode 100644 index a5fe50d4..00000000 --- a/vendor/github.com/casbin/govaluate/EvaluableExpression.go +++ /dev/null @@ -1,276 +0,0 @@ -package govaluate - -import ( - "errors" - "fmt" -) - -const isoDateFormat string = "2006-01-02T15:04:05.999999999Z0700" -const shortCircuitHolder int = -1 - -var DUMMY_PARAMETERS = MapParameters(map[string]interface{}{}) - -/* - EvaluableExpression represents a set of ExpressionTokens which, taken together, - are an expression that can be evaluated down into a single value. -*/ -type EvaluableExpression struct { - - /* - Represents the query format used to output dates. Typically only used when creating SQL or Mongo queries from an expression. - Defaults to the complete ISO8601 format, including nanoseconds. - */ - QueryDateFormat string - - /* - Whether or not to safely check types when evaluating. - If true, this library will return error messages when invalid types are used. - If false, the library will panic when operators encounter types they can't use. - - This is exclusively for users who need to squeeze every ounce of speed out of the library as they can, - and you should only set this to false if you know exactly what you're doing. - */ - ChecksTypes bool - - tokens []ExpressionToken - evaluationStages *evaluationStage - inputExpression string -} - -/* - Parses a new EvaluableExpression from the given [expression] string. - Returns an error if the given expression has invalid syntax. -*/ -func NewEvaluableExpression(expression string) (*EvaluableExpression, error) { - - functions := make(map[string]ExpressionFunction) - return NewEvaluableExpressionWithFunctions(expression, functions) -} - -/* - Similar to [NewEvaluableExpression], except that instead of a string, an already-tokenized expression is given. - This is useful in cases where you may be generating an expression automatically, or using some other parser (e.g., to parse from a query language) -*/ -func NewEvaluableExpressionFromTokens(tokens []ExpressionToken) (*EvaluableExpression, error) { - - var ret *EvaluableExpression - var err error - - ret = new(EvaluableExpression) - ret.QueryDateFormat = isoDateFormat - - err = checkBalance(tokens) - if err != nil { - return nil, err - } - - err = checkExpressionSyntax(tokens) - if err != nil { - return nil, err - } - - ret.tokens, err = optimizeTokens(tokens) - if err != nil { - return nil, err - } - - ret.evaluationStages, err = planStages(ret.tokens) - if err != nil { - return nil, err - } - - ret.ChecksTypes = true - return ret, nil -} - -/* - Similar to [NewEvaluableExpression], except enables the use of user-defined functions. - Functions passed into this will be available to the expression. -*/ -func NewEvaluableExpressionWithFunctions(expression string, functions map[string]ExpressionFunction) (*EvaluableExpression, error) { - - var ret *EvaluableExpression - var err error - - ret = new(EvaluableExpression) - ret.QueryDateFormat = isoDateFormat - ret.inputExpression = expression - - ret.tokens, err = parseTokens(expression, functions) - if err != nil { - return nil, err - } - - err = checkBalance(ret.tokens) - if err != nil { - return nil, err - } - - err = checkExpressionSyntax(ret.tokens) - if err != nil { - return nil, err - } - - ret.tokens, err = optimizeTokens(ret.tokens) - if err != nil { - return nil, err - } - - ret.evaluationStages, err = planStages(ret.tokens) - if err != nil { - return nil, err - } - - ret.ChecksTypes = true - return ret, nil -} - -/* - Same as `Eval`, but automatically wraps a map of parameters into a `govalute.Parameters` structure. -*/ -func (this EvaluableExpression) Evaluate(parameters map[string]interface{}) (interface{}, error) { - - if parameters == nil { - return this.Eval(nil) - } - - return this.Eval(MapParameters(parameters)) -} - -/* - Runs the entire expression using the given [parameters]. - e.g., If the expression contains a reference to the variable "foo", it will be taken from `parameters.Get("foo")`. - - This function returns errors if the combination of expression and parameters cannot be run, - such as if a variable in the expression is not present in [parameters]. - - In all non-error circumstances, this returns the single value result of the expression and parameters given. - e.g., if the expression is "1 + 1", this will return 2.0. - e.g., if the expression is "foo + 1" and parameters contains "foo" = 2, this will return 3.0 -*/ -func (this EvaluableExpression) Eval(parameters Parameters) (interface{}, error) { - - if this.evaluationStages == nil { - return nil, nil - } - - if parameters != nil { - parameters = &sanitizedParameters{parameters} - } else { - parameters = DUMMY_PARAMETERS - } - - return this.evaluateStage(this.evaluationStages, parameters) -} - -func (this EvaluableExpression) evaluateStage(stage *evaluationStage, parameters Parameters) (interface{}, error) { - - var left, right interface{} - var err error - - if stage.leftStage != nil { - left, err = this.evaluateStage(stage.leftStage, parameters) - if err != nil { - return nil, err - } - } - - if stage.isShortCircuitable() { - switch stage.symbol { - case AND: - if left == false { - return false, nil - } - case OR: - if left == true { - return true, nil - } - case COALESCE: - if left != nil { - return left, nil - } - - case TERNARY_TRUE: - if left == false { - right = shortCircuitHolder - } - case TERNARY_FALSE: - if left != nil { - right = shortCircuitHolder - } - } - } - - if right != shortCircuitHolder && stage.rightStage != nil { - right, err = this.evaluateStage(stage.rightStage, parameters) - if err != nil { - return nil, err - } - } - - if this.ChecksTypes { - if stage.typeCheck == nil { - - err = typeCheck(stage.leftTypeCheck, left, stage.symbol, stage.typeErrorFormat) - if err != nil { - return nil, err - } - - err = typeCheck(stage.rightTypeCheck, right, stage.symbol, stage.typeErrorFormat) - if err != nil { - return nil, err - } - } else { - // special case where the type check needs to know both sides to determine if the operator can handle it - if !stage.typeCheck(left, right) { - errorMsg := fmt.Sprintf(stage.typeErrorFormat, left, stage.symbol.String()) - return nil, errors.New(errorMsg) - } - } - } - - return stage.operator(left, right, parameters) -} - -func typeCheck(check stageTypeCheck, value interface{}, symbol OperatorSymbol, format string) error { - - if check == nil { - return nil - } - - if check(value) { - return nil - } - - errorMsg := fmt.Sprintf(format, value, symbol.String()) - return errors.New(errorMsg) -} - -/* - Returns an array representing the ExpressionTokens that make up this expression. -*/ -func (this EvaluableExpression) Tokens() []ExpressionToken { - - return this.tokens -} - -/* - Returns the original expression used to create this EvaluableExpression. -*/ -func (this EvaluableExpression) String() string { - - return this.inputExpression -} - -/* - Returns an array representing the variables contained in this EvaluableExpression. -*/ -func (this EvaluableExpression) Vars() []string { - var varlist []string - for _, val := range this.Tokens() { - if val.Kind == VARIABLE { - varlist = append(varlist, val.Value.(string)) - } - } - return varlist -} diff --git a/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go b/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go deleted file mode 100644 index 52409fa2..00000000 --- a/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go +++ /dev/null @@ -1,167 +0,0 @@ -package govaluate - -import ( - "errors" - "fmt" - "regexp" - "time" -) - -/* -Returns a string representing this expression as if it were written in SQL. -This function assumes that all parameters exist within the same table, and that the table essentially represents -a serialized object of some sort (e.g., hibernate). -If your data model is more normalized, you may need to consider iterating through each actual token given by `Tokens()` -to create your query. - -Boolean values are considered to be "1" for true, "0" for false. - -Times are formatted according to this.QueryDateFormat. -*/ -func (this EvaluableExpression) ToSQLQuery() (string, error) { - - var stream *tokenStream - var transactions *expressionOutputStream - var transaction string - var err error - - stream = newTokenStream(this.tokens) - transactions = new(expressionOutputStream) - - for stream.hasNext() { - - transaction, err = this.findNextSQLString(stream, transactions) - if err != nil { - return "", err - } - - transactions.add(transaction) - } - - return transactions.createString(" "), nil -} - -func (this EvaluableExpression) findNextSQLString(stream *tokenStream, transactions *expressionOutputStream) (string, error) { - - var token ExpressionToken - var ret string - - token = stream.next() - - switch token.Kind { - - case STRING: - ret = fmt.Sprintf("'%v'", token.Value) - case PATTERN: - ret = fmt.Sprintf("'%s'", token.Value.(*regexp.Regexp).String()) - case TIME: - ret = fmt.Sprintf("'%s'", token.Value.(time.Time).Format(this.QueryDateFormat)) - - case LOGICALOP: - switch logicalSymbols[token.Value.(string)] { - - case AND: - ret = "AND" - case OR: - ret = "OR" - } - - case BOOLEAN: - if token.Value.(bool) { - ret = "1" - } else { - ret = "0" - } - - case VARIABLE: - ret = fmt.Sprintf("[%s]", token.Value.(string)) - - case NUMERIC: - ret = fmt.Sprintf("%g", token.Value.(float64)) - - case COMPARATOR: - switch comparatorSymbols[token.Value.(string)] { - - case EQ: - ret = "=" - case NEQ: - ret = "<>" - case REQ: - ret = "RLIKE" - case NREQ: - ret = "NOT RLIKE" - default: - ret = fmt.Sprintf("%s", token.Value) - } - - case TERNARY: - - switch ternarySymbols[token.Value.(string)] { - - case COALESCE: - - left := transactions.rollback() - right, err := this.findNextSQLString(stream, transactions) - if err != nil { - return "", err - } - - ret = fmt.Sprintf("COALESCE(%v, %v)", left, right) - case TERNARY_TRUE: - fallthrough - case TERNARY_FALSE: - return "", errors.New("Ternary operators are unsupported in SQL output") - } - case PREFIX: - switch prefixSymbols[token.Value.(string)] { - - case INVERT: - ret = "NOT" - default: - - right, err := this.findNextSQLString(stream, transactions) - if err != nil { - return "", err - } - - ret = fmt.Sprintf("%s%s", token.Value.(string), right) - } - case MODIFIER: - - switch modifierSymbols[token.Value.(string)] { - - case EXPONENT: - - left := transactions.rollback() - right, err := this.findNextSQLString(stream, transactions) - if err != nil { - return "", err - } - - ret = fmt.Sprintf("POW(%s, %s)", left, right) - case MODULUS: - - left := transactions.rollback() - right, err := this.findNextSQLString(stream, transactions) - if err != nil { - return "", err - } - - ret = fmt.Sprintf("MOD(%s, %s)", left, right) - default: - ret = fmt.Sprintf("%s", token.Value) - } - case CLAUSE: - ret = "(" - case CLAUSE_CLOSE: - ret = ")" - case SEPARATOR: - ret = "," - - default: - errorMsg := fmt.Sprintf("Unrecognized query token '%s' of kind '%s'", token.Value, token.Kind) - return "", errors.New(errorMsg) - } - - return ret, nil -} diff --git a/vendor/github.com/casbin/govaluate/ExpressionToken.go b/vendor/github.com/casbin/govaluate/ExpressionToken.go deleted file mode 100644 index f849f381..00000000 --- a/vendor/github.com/casbin/govaluate/ExpressionToken.go +++ /dev/null @@ -1,9 +0,0 @@ -package govaluate - -/* - Represents a single parsed token. -*/ -type ExpressionToken struct { - Kind TokenKind - Value interface{} -} diff --git a/vendor/github.com/casbin/govaluate/LICENSE b/vendor/github.com/casbin/govaluate/LICENSE deleted file mode 100644 index 0ef0f41e..00000000 --- a/vendor/github.com/casbin/govaluate/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2016 George Lester - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/casbin/govaluate/MANUAL.md b/vendor/github.com/casbin/govaluate/MANUAL.md deleted file mode 100644 index e0658285..00000000 --- a/vendor/github.com/casbin/govaluate/MANUAL.md +++ /dev/null @@ -1,176 +0,0 @@ -govaluate -==== - -This library contains quite a lot of functionality, this document is meant to be formal documentation on the operators and features of it. -Some of this documentation may duplicate what's in README.md, but should never conflict. - -# Types - -This library only officially deals with four types; `float64`, `bool`, `string`, and arrays. - -All numeric literals, with or without a radix, will be converted to `float64` for evaluation. For instance; in practice, there is no difference between the literals "1.0" and "1", they both end up as `float64`. This matters to users because if you intend to return numeric values from your expressions, then the returned value will be `float64`, not any other numeric type. - -Any string _literal_ (not parameter) which is interpretable as a date will be converted to a `float64` representation of that date's unix time. Any `time.Time` parameters will not be operable with these date literals; such parameters will need to use the `time.Time.Unix()` method to get a numeric representation. - -Arrays are untyped, and can be mixed-type. Internally they're all just `interface{}`. Only two operators can interact with arrays, `IN` and `,`. All other operators will refuse to operate on arrays. - -# Operators - -## Modifiers - -### Addition, concatenation `+` - -If either left or right sides of the `+` operator are a `string`, then this operator will perform string concatenation and return that result. If neither are string, then both must be numeric, and this will return a numeric result. - -Any other case is invalid. - -### Arithmetic `-` `*` `/` `**` `%` - -`**` refers to "take to the power of". For instance, `3 ** 4` == 81. - -* _Left side_: numeric -* _Right side_: numeric -* _Returns_: numeric - -### Bitwise shifts, masks `>>` `<<` `|` `&` `^` - -All of these operators convert their `float64` left and right sides to `int64`, perform their operation, and then convert back. -Given how this library assumes numeric are represented (as `float64`), it is unlikely that this behavior will change, even though it may cause havoc with extremely large or small numbers. - -* _Left side_: numeric -* _Right side_: numeric -* _Returns_: numeric - -### Negation `-` - -Prefix only. This can never have a left-hand value. - -* _Right side_: numeric -* _Returns_: numeric - -### Inversion `!` - -Prefix only. This can never have a left-hand value. - -* _Right side_: bool -* _Returns_: bool - -### Bitwise NOT `~` - -Prefix only. This can never have a left-hand value. - -* _Right side_: numeric -* _Returns_: numeric - -## Logical Operators - -For all logical operators, this library will short-circuit the operation if the left-hand side is sufficient to determine what to do. For instance, `true || expensiveOperation()` will not actually call `expensiveOperation()`, since it knows the left-hand side is `true`. - -### Logical AND/OR `&&` `||` - -* _Left side_: bool -* _Right side_: bool -* _Returns_: bool - -### Ternary true `?` - -Checks if the left side is `true`. If so, returns the right side. If the left side is `false`, returns `nil`. -In practice, this is commonly used with the other ternary operator. - -* _Left side_: bool -* _Right side_: Any type. -* _Returns_: Right side or `nil` - -### Ternary false `:` - -Checks if the left side is `nil`. If so, returns the right side. If the left side is non-nil, returns the left side. -In practice, this is commonly used with the other ternary operator. - -* _Left side_: Any type. -* _Right side_: Any type. -* _Returns_: Right side or `nil` - -### Null coalescence `??` - -Similar to the C# operator. If the left value is non-nil, it returns that. If not, then the right-value is returned. - -* _Left side_: Any type. -* _Right side_: Any type. -* _Returns_: No specific type - whichever is passed to it. - -## Comparators - -### Numeric/lexicographic comparators `>` `<` `>=` `<=` - -If both sides are numeric, this returns the usual greater/lesser behavior that would be expected. -If both sides are string, this returns the lexicographic comparison of the strings. This uses Go's standard lexicographic compare. - -* _Accepts_: Left and right side must either be both string, or both numeric. -* _Returns_: bool - -### Regex comparators `=~` `!~` - -These use go's standard `regexp` flavor of regex. The left side is expected to be the candidate string, the right side is the pattern. `=~` returns whether or not the candidate string matches the regex pattern given on the right. `!~` is the inverted version of the same logic. - -* _Left side_: string -* _Right side_: string -* _Returns_: bool - -## Arrays - -### Separator `,` - -The separator, always paired with parenthesis, creates arrays. It must always have both a left and right-hand value, so for instance `(, 0)` and `(0,)` are invalid uses of it. - -Again, this should always be used with parenthesis; like `(1, 2, 3, 4)`. - -### Membership `IN` - -The only operator with a text name, this operator checks the right-hand side array to see if it contains a value that is equal to the left-side value. -Equality is determined by the use of the `==` operator, and this library doesn't check types between the values. Any two values, when cast to `interface{}`, and can still be checked for equality with `==` will act as expected. - -Note that you can use a parameter for the array, but it must be an `[]interface{}`. - -* _Left side_: Any type. -* _Right side_: array -* _Returns_: bool - -# Parameters - -Parameters must be passed in every time the expression is evaluated. Parameters can be of any type, but will not cause errors unless actually used in an erroneous way. There is no difference in behavior for any of the above operators for parameters - they are type checked when used. - -All `int` and `float` values of any width will be converted to `float64` before use. - -At no point is the parameter structure, or any value thereof, modified by this library. - -## Alternates to maps - -The default form of parameters as a map may not serve your use case. You may have parameters in some other structure, you may want to change the no-parameter-found behavior, or maybe even just have some debugging print statements invoked when a parameter is accessed. - -To do this, define a type that implements the `govaluate.Parameters` interface. When you want to evaluate, instead call `EvaluableExpression.Eval` and pass your parameter structure. - -# Functions - -During expression parsing (_not_ evaluation), a map of functions can be given to `govaluate.NewEvaluableExpressionWithFunctions` (the lengthiest and finest of function names). The resultant expression will be able to invoke those functions during evaluation. Once parsed, an expression cannot have functions added or removed - a new expression will need to be created if you want to change the functions, or behavior of said functions. - -Functions always take the form `()`, including parens. Functions can have an empty list of parameters, like `()`, but still must have parens. - -If the expression contains something that looks like it ought to be a function (such as `foo()`), but no such function was given to it, it will error on parsing. - -Functions must be of type `map[string]govaluate.ExpressionFunction`. `ExpressionFunction`, for brevity, has the following signature: - -`func(args ...interface{}) (interface{}, error)` - -Where `args` is whatever is passed to the function when called. If a non-nil error is returned from a function during evaluation, the evaluation stops and ultimately returns that error to the caller of `Evaluate()` or `Eval()`. - -## Built-in functions - -There aren't any builtin functions. The author is opposed to maintaining a standard library of functions to be used. - -Every use case of this library is different, and even in simple use cases (such as parameters, see above) different users need different behavior, naming, or even functionality. The author prefers that users make their own decisions about what functions they need, and how they operate. - -# Equality - -The `==` and `!=` operators involve a moderately complex workflow. They use [`reflect.DeepEqual`](https://golang.org/pkg/reflect/#DeepEqual). This is for complicated reasons, but there are some types in Go that cannot be compared with the native `==` operator. Arrays, in particular, cannot be compared - Go will panic if you try. One might assume this could be handled with the type checking system in `govaluate`, but unfortunately without reflection there is no way to know if a variable is a slice/array. Worse, structs can be incomparable if they _contain incomparable types_. - -It's all very complicated. Fortunately, Go includes the `reflect.DeepEqual` function to handle all the edge cases. Currently, `govaluate` uses that for all equality/inequality. diff --git a/vendor/github.com/casbin/govaluate/OperatorSymbol.go b/vendor/github.com/casbin/govaluate/OperatorSymbol.go deleted file mode 100644 index 4b810658..00000000 --- a/vendor/github.com/casbin/govaluate/OperatorSymbol.go +++ /dev/null @@ -1,309 +0,0 @@ -package govaluate - -/* - Represents the valid symbols for operators. - -*/ -type OperatorSymbol int - -const ( - VALUE OperatorSymbol = iota - LITERAL - NOOP - EQ - NEQ - GT - LT - GTE - LTE - REQ - NREQ - IN - - AND - OR - - PLUS - MINUS - BITWISE_AND - BITWISE_OR - BITWISE_XOR - BITWISE_LSHIFT - BITWISE_RSHIFT - MULTIPLY - DIVIDE - MODULUS - EXPONENT - - NEGATE - INVERT - BITWISE_NOT - - TERNARY_TRUE - TERNARY_FALSE - COALESCE - - FUNCTIONAL - ACCESS - SEPARATE -) - -type operatorPrecedence int - -const ( - noopPrecedence operatorPrecedence = iota - valuePrecedence - functionalPrecedence - prefixPrecedence - exponentialPrecedence - additivePrecedence - bitwisePrecedence - bitwiseShiftPrecedence - multiplicativePrecedence - comparatorPrecedence - ternaryPrecedence - logicalAndPrecedence - logicalOrPrecedence - separatePrecedence -) - -func findOperatorPrecedenceForSymbol(symbol OperatorSymbol) operatorPrecedence { - - switch symbol { - case NOOP: - return noopPrecedence - case VALUE: - return valuePrecedence - case EQ: - fallthrough - case NEQ: - fallthrough - case GT: - fallthrough - case LT: - fallthrough - case GTE: - fallthrough - case LTE: - fallthrough - case REQ: - fallthrough - case NREQ: - fallthrough - case IN: - return comparatorPrecedence - case AND: - return logicalAndPrecedence - case OR: - return logicalOrPrecedence - case BITWISE_AND: - fallthrough - case BITWISE_OR: - fallthrough - case BITWISE_XOR: - return bitwisePrecedence - case BITWISE_LSHIFT: - fallthrough - case BITWISE_RSHIFT: - return bitwiseShiftPrecedence - case PLUS: - fallthrough - case MINUS: - return additivePrecedence - case MULTIPLY: - fallthrough - case DIVIDE: - fallthrough - case MODULUS: - return multiplicativePrecedence - case EXPONENT: - return exponentialPrecedence - case BITWISE_NOT: - fallthrough - case NEGATE: - fallthrough - case INVERT: - return prefixPrecedence - case COALESCE: - fallthrough - case TERNARY_TRUE: - fallthrough - case TERNARY_FALSE: - return ternaryPrecedence - case ACCESS: - fallthrough - case FUNCTIONAL: - return functionalPrecedence - case SEPARATE: - return separatePrecedence - } - - return valuePrecedence -} - -/* - Map of all valid comparators, and their string equivalents. - Used during parsing of expressions to determine if a symbol is, in fact, a comparator. - Also used during evaluation to determine exactly which comparator is being used. -*/ -var comparatorSymbols = map[string]OperatorSymbol{ - "==": EQ, - "!=": NEQ, - ">": GT, - ">=": GTE, - "<": LT, - "<=": LTE, - "=~": REQ, - "!~": NREQ, - "in": IN, -} - -var logicalSymbols = map[string]OperatorSymbol{ - "&&": AND, - "||": OR, -} - -var bitwiseSymbols = map[string]OperatorSymbol{ - "^": BITWISE_XOR, - "&": BITWISE_AND, - "|": BITWISE_OR, -} - -var bitwiseShiftSymbols = map[string]OperatorSymbol{ - ">>": BITWISE_RSHIFT, - "<<": BITWISE_LSHIFT, -} - -var additiveSymbols = map[string]OperatorSymbol{ - "+": PLUS, - "-": MINUS, -} - -var multiplicativeSymbols = map[string]OperatorSymbol{ - "*": MULTIPLY, - "/": DIVIDE, - "%": MODULUS, -} - -var exponentialSymbolsS = map[string]OperatorSymbol{ - "**": EXPONENT, -} - -var prefixSymbols = map[string]OperatorSymbol{ - "-": NEGATE, - "!": INVERT, - "~": BITWISE_NOT, -} - -var ternarySymbols = map[string]OperatorSymbol{ - "?": TERNARY_TRUE, - ":": TERNARY_FALSE, - "??": COALESCE, -} - -// this is defined separately from additiveSymbols et al because it's needed for parsing, not stage planning. -var modifierSymbols = map[string]OperatorSymbol{ - "+": PLUS, - "-": MINUS, - "*": MULTIPLY, - "/": DIVIDE, - "%": MODULUS, - "**": EXPONENT, - "&": BITWISE_AND, - "|": BITWISE_OR, - "^": BITWISE_XOR, - ">>": BITWISE_RSHIFT, - "<<": BITWISE_LSHIFT, -} - -var separatorSymbols = map[string]OperatorSymbol{ - ",": SEPARATE, -} - -/* - Returns true if this operator is contained by the given array of candidate symbols. - False otherwise. -*/ -func (this OperatorSymbol) IsModifierType(candidate []OperatorSymbol) bool { - - for _, symbolType := range candidate { - if this == symbolType { - return true - } - } - - return false -} - -/* - Generally used when formatting type check errors. - We could store the stringified symbol somewhere else and not require a duplicated codeblock to translate - OperatorSymbol to string, but that would require more memory, and another field somewhere. - Adding operators is rare enough that we just stringify it here instead. -*/ -func (this OperatorSymbol) String() string { - - switch this { - case NOOP: - return "NOOP" - case VALUE: - return "VALUE" - case EQ: - return "=" - case NEQ: - return "!=" - case GT: - return ">" - case LT: - return "<" - case GTE: - return ">=" - case LTE: - return "<=" - case REQ: - return "=~" - case NREQ: - return "!~" - case AND: - return "&&" - case OR: - return "||" - case IN: - return "in" - case BITWISE_AND: - return "&" - case BITWISE_OR: - return "|" - case BITWISE_XOR: - return "^" - case BITWISE_LSHIFT: - return "<<" - case BITWISE_RSHIFT: - return ">>" - case PLUS: - return "+" - case MINUS: - return "-" - case MULTIPLY: - return "*" - case DIVIDE: - return "/" - case MODULUS: - return "%" - case EXPONENT: - return "**" - case NEGATE: - return "-" - case INVERT: - return "!" - case BITWISE_NOT: - return "~" - case TERNARY_TRUE: - return "?" - case TERNARY_FALSE: - return ":" - case COALESCE: - return "??" - } - return "" -} diff --git a/vendor/github.com/casbin/govaluate/README.md b/vendor/github.com/casbin/govaluate/README.md deleted file mode 100644 index 576a9df1..00000000 --- a/vendor/github.com/casbin/govaluate/README.md +++ /dev/null @@ -1,232 +0,0 @@ -govaluate -==== - -[![Build Status](https://github.com/casbin/govaluate/actions/workflows/build.yml/badge.svg)](https://github.com/casbin/govaluate/actions/workflows/build.yml) -[![Godoc](https://godoc.org/github.com/casbin/govaluate?status.svg)](https://pkg.go.dev/github.com/casbin/govaluate) -[![Go Report Card](https://goreportcard.com/badge/github.com/casbin/govaluate)](https://goreportcard.com/report/github.com/casbin/govaluate) - -Provides support for evaluating arbitrary C-like artithmetic/string expressions. - -Why can't you just write these expressions in code? --- - -Sometimes, you can't know ahead-of-time what an expression will look like, or you want those expressions to be configurable. -Perhaps you've got a set of data running through your application, and you want to allow your users to specify some validations to run on it before committing it to a database. Or maybe you've written a monitoring framework which is capable of gathering a bunch of metrics, then evaluating a few expressions to see if any metrics should be alerted upon, but the conditions for alerting are different for each monitor. - -A lot of people wind up writing their own half-baked style of evaluation language that fits their needs, but isn't complete. Or they wind up baking the expression into the actual executable, even if they know it's subject to change. These strategies may work, but they take time to implement, time for users to learn, and induce technical debt as requirements change. This library is meant to cover all the normal C-like expressions, so that you don't have to reinvent one of the oldest wheels on a computer. - -How do I use it? --- - -You create a new EvaluableExpression, then call "Evaluate" on it. - -```go - expression, err := govaluate.NewEvaluableExpression("10 > 0"); - result, err := expression.Evaluate(nil); - // result is now set to "true", the bool value. -``` - -Cool, but how about with parameters? - -```go - expression, err := govaluate.NewEvaluableExpression("foo > 0"); - - parameters := make(map[string]interface{}, 8) - parameters["foo"] = -1; - - result, err := expression.Evaluate(parameters); - // result is now set to "false", the bool value. -``` - -That's cool, but we can almost certainly have done all that in code. What about a complex use case that involves some math? - -```go - expression, err := govaluate.NewEvaluableExpression("(requests_made * requests_succeeded / 100) >= 90"); - - parameters := make(map[string]interface{}, 8) - parameters["requests_made"] = 100; - parameters["requests_succeeded"] = 80; - - result, err := expression.Evaluate(parameters); - // result is now set to "false", the bool value. -``` - -Or maybe you want to check the status of an alive check ("smoketest") page, which will be a string? - -```go - expression, err := govaluate.NewEvaluableExpression("http_response_body == 'service is ok'"); - - parameters := make(map[string]interface{}, 8) - parameters["http_response_body"] = "service is ok"; - - result, err := expression.Evaluate(parameters); - // result is now set to "true", the bool value. -``` - -These examples have all returned boolean values, but it's equally possible to return numeric ones. - -```go - expression, err := govaluate.NewEvaluableExpression("(mem_used / total_mem) * 100"); - - parameters := make(map[string]interface{}, 8) - parameters["total_mem"] = 1024; - parameters["mem_used"] = 512; - - result, err := expression.Evaluate(parameters); - // result is now set to "50.0", the float64 value. -``` - -You can also do date parsing, though the formats are somewhat limited. Stick to RF3339, ISO8061, unix date, or ruby date formats. If you're having trouble getting a date string to parse, check the list of formats actually used: [parsing.go:248](https://github.com/casbin/govaluate/blob/0580e9b47a69125afa0e4ebd1cf93c49eb5a43ec/parsing.go#L258). - -```go - expression, err := govaluate.NewEvaluableExpression("'2014-01-02' > '2014-01-01 23:59:59'"); - result, err := expression.Evaluate(nil); - - // result is now set to true -``` - -Expressions are parsed once, and can be re-used multiple times. Parsing is the compute-intensive phase of the process, so if you intend to use the same expression with different parameters, just parse it once. Like so; - -```go - expression, err := govaluate.NewEvaluableExpression("response_time <= 100"); - parameters := make(map[string]interface{}, 8) - - for { - parameters["response_time"] = pingSomething(); - result, err := expression.Evaluate(parameters) - } -``` - -The normal C-standard order of operators is respected. When writing an expression, be sure that you either order the operators correctly, or use parenthesis to clarify which portions of an expression should be run first. - -Escaping characters --- - -Sometimes you'll have parameters that have spaces, slashes, pluses, ampersands or some other character -that this library interprets as something special. For example, the following expression will not -act as one might expect: - - "response-time < 100" - -As written, the library will parse it as "[response] minus [time] is less than 100". In reality, -"response-time" is meant to be one variable that just happens to have a dash in it. - -There are two ways to work around this. First, you can escape the entire parameter name: - - "[response-time] < 100" - -Or you can use backslashes to escape only the minus sign. - - "response\\-time < 100" - -Backslashes can be used anywhere in an expression to escape the very next character. Square bracketed parameter names can be used instead of plain parameter names at any time. - -Functions --- - -You may have cases where you want to call a function on a parameter during execution of the expression. Perhaps you want to aggregate some set of data, but don't know the exact aggregation you want to use until you're writing the expression itself. Or maybe you have a mathematical operation you want to perform, for which there is no operator; like `log` or `tan` or `sqrt`. For cases like this, you can provide a map of functions to `NewEvaluableExpressionWithFunctions`, which will then be able to use them during execution. For instance; - -```go - functions := map[string]govaluate.ExpressionFunction { - "strlen": func(args ...interface{}) (interface{}, error) { - length := len(args[0].(string)) - return (float64)(length), nil - }, - } - - expString := "strlen('someReallyLongInputString') <= 16" - expression, _ := govaluate.NewEvaluableExpressionWithFunctions(expString, functions) - - result, _ := expression.Evaluate(nil) - // result is now "false", the boolean value -``` - -Functions can accept any number of arguments, correctly handles nested functions, and arguments can be of any type (even if none of this library's operators support evaluation of that type). For instance, each of these usages of functions in an expression are valid (assuming that the appropriate functions and parameters are given): - -```go -"sqrt(x1 ** y1, x2 ** y2)" -"max(someValue, abs(anotherValue), 10 * lastValue)" -``` - -Functions cannot be passed as parameters, they must be known at the time when the expression is parsed, and are unchangeable after parsing. - -Accessors --- - -If you have structs in your parameters, you can access their fields and methods in the usual way. For instance, given a struct that has a method "Echo", present in the parameters as `foo`, the following is valid: - - "foo.Echo('hello world')" - -Fields are accessed in a similar way. Assuming `foo` has a field called "Length": - - "foo.Length > 9000" - -The values of a `map` are accessed in the same way. Assuming the parameter `foo` is `map[string]int{ "bar": 1 }` - - "foo.bar == 1" - -Accessors can be nested to any depth, like the following - - "foo.Bar.Baz.SomeFunction()" - -This may be convenient, but note that using accessors involves a _lot_ of reflection. This makes the expression about four times slower than just using a parameter (consult the benchmarks for more precise measurements on your system). -If at all reasonable, the author recommends extracting the values you care about into a parameter map beforehand, or defining a struct that implements the `Parameters` interface, and which grabs fields as required. If there are functions you want to use, it's better to pass them as expression functions (see the above section). These approaches use no reflection, and are designed to be fast and clean. - -What operators and types does this support? --- - -* Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<` -* Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~` -* Logical ops: `||` `&&` -* Numeric constants, as 64-bit floating point (`12345.678`) -* String constants (single quotes: `'foobar'`) -* Date constants (single quotes, using any permutation of RFC3339, ISO8601, ruby date, or unix date; date parsing is automatically tried with any string constant) -* Boolean constants: `true` `false` -* Parenthesis to control order of evaluation `(` `)` -* Arrays (anything separated by `,` within parenthesis: `(1, 2, 'foo')`) -* Prefixes: `!` `-` `~` -* Ternary conditional: `?` `:` -* Null coalescence: `??` - -See [MANUAL.md](https://github.com/casbin/govaluate/blob/master/MANUAL.md) for exacting details on what types each operator supports. - -Types --- - -Some operators don't make sense when used with some types. For instance, what does it mean to get the modulo of a string? What happens if you check to see if two numbers are logically AND'ed together? - -Everyone has a different intuition about the answers to these questions. To prevent confusion, this library will _refuse to operate_ upon types for which there is not an unambiguous meaning for the operation. See [MANUAL.md](https://github.com/casbin/govaluate/blob/master/MANUAL.md) for details about what operators are valid for which types. - -Benchmarks --- - -If you're concerned about the overhead of this library, a good range of benchmarks are built into this repo. You can run them with `go test -bench=.`. The library is built with an eye towards being quick, but has not been aggressively profiled and optimized. For most applications, though, it is completely fine. - -For a very rough idea of performance, here are the results output from a benchmark run on a 3rd-gen Macbook Pro (Linux Mint 17.1). - -``` -BenchmarkSingleParse-12 1000000 1382 ns/op -BenchmarkSimpleParse-12 200000 10771 ns/op -BenchmarkFullParse-12 30000 49383 ns/op -BenchmarkEvaluationSingle-12 50000000 30.1 ns/op -BenchmarkEvaluationNumericLiteral-12 10000000 119 ns/op -BenchmarkEvaluationLiteralModifiers-12 10000000 236 ns/op -BenchmarkEvaluationParameters-12 5000000 260 ns/op -BenchmarkEvaluationParametersModifiers-12 3000000 547 ns/op -BenchmarkComplexExpression-12 2000000 963 ns/op -BenchmarkRegexExpression-12 100000 20357 ns/op -BenchmarkConstantRegexExpression-12 1000000 1392 ns/op -ok -``` - -API Breaks --- - -While this library has very few cases which will ever result in an API break, it can happen. If you are using this in production, vendor the commit you've tested against, or use gopkg.in to redirect your import (e.g., `import "gopkg.in/casbin/govaluate.v1"`). Master branch (while infrequent) _may_ at some point contain API breaking changes, and the author will have no way to communicate these to downstreams, other than creating a new major release. - -Releases will explicitly state when an API break happens, and if they do not specify an API break it should be safe to upgrade. - -License --- - -This project is licensed under the MIT general use license. You're free to integrate, fork, and play with this code as you feel fit without consulting the author, as long as you provide proper credit to the author in your works. diff --git a/vendor/github.com/casbin/govaluate/TokenKind.go b/vendor/github.com/casbin/govaluate/TokenKind.go deleted file mode 100644 index 7c9516d2..00000000 --- a/vendor/github.com/casbin/govaluate/TokenKind.go +++ /dev/null @@ -1,75 +0,0 @@ -package govaluate - -/* - Represents all valid types of tokens that a token can be. -*/ -type TokenKind int - -const ( - UNKNOWN TokenKind = iota - - PREFIX - NUMERIC - BOOLEAN - STRING - PATTERN - TIME - VARIABLE - FUNCTION - SEPARATOR - ACCESSOR - - COMPARATOR - LOGICALOP - MODIFIER - - CLAUSE - CLAUSE_CLOSE - - TERNARY -) - -/* - GetTokenKindString returns a string that describes the given TokenKind. - e.g., when passed the NUMERIC TokenKind, this returns the string "NUMERIC". -*/ -func (kind TokenKind) String() string { - - switch kind { - - case PREFIX: - return "PREFIX" - case NUMERIC: - return "NUMERIC" - case BOOLEAN: - return "BOOLEAN" - case STRING: - return "STRING" - case PATTERN: - return "PATTERN" - case TIME: - return "TIME" - case VARIABLE: - return "VARIABLE" - case FUNCTION: - return "FUNCTION" - case SEPARATOR: - return "SEPARATOR" - case COMPARATOR: - return "COMPARATOR" - case LOGICALOP: - return "LOGICALOP" - case MODIFIER: - return "MODIFIER" - case CLAUSE: - return "CLAUSE" - case CLAUSE_CLOSE: - return "CLAUSE_CLOSE" - case TERNARY: - return "TERNARY" - case ACCESSOR: - return "ACCESSOR" - } - - return "UNKNOWN" -} diff --git a/vendor/github.com/casbin/govaluate/evaluationStage.go b/vendor/github.com/casbin/govaluate/evaluationStage.go deleted file mode 100644 index 27add4a7..00000000 --- a/vendor/github.com/casbin/govaluate/evaluationStage.go +++ /dev/null @@ -1,542 +0,0 @@ -package govaluate - -import ( - "errors" - "fmt" - "math" - "reflect" - "regexp" - "strings" - "unicode" -) - -const ( - logicalErrorFormat string = "Value '%v' cannot be used with the logical operator '%v', it is not a bool" - modifierErrorFormat string = "Value '%v' cannot be used with the modifier '%v', it is not a number" - comparatorErrorFormat string = "Value '%v' cannot be used with the comparator '%v', it is not a number" - ternaryErrorFormat string = "Value '%v' cannot be used with the ternary operator '%v', it is not a bool" - prefixErrorFormat string = "Value '%v' cannot be used with the prefix '%v'" -) - -type evaluationOperator func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) -type stageTypeCheck func(value interface{}) bool -type stageCombinedTypeCheck func(left interface{}, right interface{}) bool - -type evaluationStage struct { - symbol OperatorSymbol - - leftStage, rightStage *evaluationStage - - // the operation that will be used to evaluate this stage (such as adding [left] to [right] and return the result) - operator evaluationOperator - - // ensures that both left and right values are appropriate for this stage. Returns an error if they aren't operable. - leftTypeCheck stageTypeCheck - rightTypeCheck stageTypeCheck - - // if specified, will override whatever is used in "leftTypeCheck" and "rightTypeCheck". - // primarily used for specific operators that don't care which side a given type is on, but still requires one side to be of a given type - // (like string concat) - typeCheck stageCombinedTypeCheck - - // regardless of which type check is used, this string format will be used as the error message for type errors - typeErrorFormat string -} - -var ( - _true = interface{}(true) - _false = interface{}(false) -) - -func (this *evaluationStage) swapWith(other *evaluationStage) { - - temp := *other - other.setToNonStage(*this) - this.setToNonStage(temp) -} - -func (this *evaluationStage) setToNonStage(other evaluationStage) { - - this.symbol = other.symbol - this.operator = other.operator - this.leftTypeCheck = other.leftTypeCheck - this.rightTypeCheck = other.rightTypeCheck - this.typeCheck = other.typeCheck - this.typeErrorFormat = other.typeErrorFormat -} - -func (this *evaluationStage) isShortCircuitable() bool { - - switch this.symbol { - case AND: - fallthrough - case OR: - fallthrough - case TERNARY_TRUE: - fallthrough - case TERNARY_FALSE: - fallthrough - case COALESCE: - return true - } - - return false -} - -func noopStageRight(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return right, nil -} - -func addStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - - // string concat if either are strings - if isString(left) || isString(right) { - return fmt.Sprintf("%v%v", left, right), nil - } - - return left.(float64) + right.(float64), nil -} -func subtractStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return left.(float64) - right.(float64), nil -} -func multiplyStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return left.(float64) * right.(float64), nil -} -func divideStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return left.(float64) / right.(float64), nil -} -func exponentStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return math.Pow(left.(float64), right.(float64)), nil -} -func modulusStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return math.Mod(left.(float64), right.(float64)), nil -} -func gteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - if isString(left) && isString(right) { - return boolIface(left.(string) >= right.(string)), nil - } - return boolIface(left.(float64) >= right.(float64)), nil -} -func gtStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - if isString(left) && isString(right) { - return boolIface(left.(string) > right.(string)), nil - } - return boolIface(left.(float64) > right.(float64)), nil -} -func lteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - if isString(left) && isString(right) { - return boolIface(left.(string) <= right.(string)), nil - } - return boolIface(left.(float64) <= right.(float64)), nil -} -func ltStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - if isString(left) && isString(right) { - return boolIface(left.(string) < right.(string)), nil - } - return boolIface(left.(float64) < right.(float64)), nil -} -func equalStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return boolIface(reflect.DeepEqual(left, right)), nil -} -func notEqualStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return boolIface(!reflect.DeepEqual(left, right)), nil -} -func andStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return boolIface(left.(bool) && right.(bool)), nil -} -func orStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return boolIface(left.(bool) || right.(bool)), nil -} -func negateStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return -right.(float64), nil -} -func invertStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return boolIface(!right.(bool)), nil -} -func bitwiseNotStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return float64(^int64(right.(float64))), nil -} -func ternaryIfStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - if left.(bool) { - return right, nil - } - return nil, nil -} -func ternaryElseStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - if left != nil { - return left, nil - } - return right, nil -} - -func regexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - - var pattern *regexp.Regexp - var err error - - switch right := right.(type) { - case string: - pattern, err = regexp.Compile(right) - if err != nil { - return nil, fmt.Errorf("Unable to compile regexp pattern '%v': %v", right, err) - } - case *regexp.Regexp: - pattern = right - } - - return pattern.Match([]byte(left.(string))), nil -} - -func notRegexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - - ret, err := regexStage(left, right, parameters) - if err != nil { - return nil, err - } - - return !(ret.(bool)), nil -} - -func bitwiseOrStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return float64(int64(left.(float64)) | int64(right.(float64))), nil -} -func bitwiseAndStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return float64(int64(left.(float64)) & int64(right.(float64))), nil -} -func bitwiseXORStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return float64(int64(left.(float64)) ^ int64(right.(float64))), nil -} -func leftShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return float64(uint64(left.(float64)) << uint64(right.(float64))), nil -} -func rightShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return float64(uint64(left.(float64)) >> uint64(right.(float64))), nil -} - -func makeParameterStage(parameterName string) evaluationOperator { - - return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - value, err := parameters.Get(parameterName) - if err != nil { - return nil, err - } - - return value, nil - } -} - -func makeLiteralStage(literal interface{}) evaluationOperator { - return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - return literal, nil - } -} - -func makeFunctionStage(function ExpressionFunction) evaluationOperator { - - return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - - if right == nil { - return function() - } - - switch right := right.(type) { - case []interface{}: - return function(right...) - default: - return function(right) - } - } -} - -func typeConvertParam(p reflect.Value, t reflect.Type) (ret reflect.Value, err error) { - defer func() { - if r := recover(); r != nil { - errorMsg := fmt.Sprintf("Argument type conversion failed: failed to convert '%s' to '%s'", p.Kind().String(), t.Kind().String()) - err = errors.New(errorMsg) - ret = p - } - }() - - return p.Convert(t), nil -} - -func typeConvertParams(method reflect.Value, params []reflect.Value) ([]reflect.Value, error) { - - methodType := method.Type() - numIn := methodType.NumIn() - numParams := len(params) - - if numIn != numParams { - if numIn > numParams { - return nil, fmt.Errorf("Too few arguments to parameter call: got %d arguments, expected %d", len(params), numIn) - } - return nil, fmt.Errorf("Too many arguments to parameter call: got %d arguments, expected %d", len(params), numIn) - } - - for i := 0; i < numIn; i++ { - t := methodType.In(i) - p := params[i] - pt := p.Type() - - if t.Kind() != pt.Kind() { - np, err := typeConvertParam(p, t) - if err != nil { - return nil, err - } - params[i] = np - } - } - - return params, nil -} - -func makeAccessorStage(pair []string) evaluationOperator { - - reconstructed := strings.Join(pair, ".") - - return func(left interface{}, right interface{}, parameters Parameters) (ret interface{}, err error) { - - var params []reflect.Value - - value, err := parameters.Get(pair[0]) - if err != nil { - return nil, err - } - - // while this library generally tries to handle panic-inducing cases on its own, - // accessors are a sticky case which have a lot of possible ways to fail. - // therefore every call to an accessor sets up a defer that tries to recover from panics, converting them to errors. - defer func() { - if r := recover(); r != nil { - errorMsg := fmt.Sprintf("Failed to access '%s': %v", reconstructed, r.(string)) - err = errors.New(errorMsg) - ret = nil - } - }() - - LOOP: - for i := 1; i < len(pair); i++ { - - coreValue := reflect.ValueOf(value) - - var corePtrVal reflect.Value - - // if this is a pointer, resolve it. - if coreValue.Kind() == reflect.Ptr { - corePtrVal = coreValue - coreValue = coreValue.Elem() - } - - var field reflect.Value - var method reflect.Value - - switch coreValue.Kind() { - case reflect.Struct: - // check if field is exported - firstCharacter := getFirstRune(pair[i]) - if unicode.ToUpper(firstCharacter) != firstCharacter { - errorMsg := fmt.Sprintf("Unable to access unexported field '%s' in '%s'", pair[i], pair[i-1]) - return nil, errors.New(errorMsg) - } - - field = coreValue.FieldByName(pair[i]) - if field != (reflect.Value{}) { - value = field.Interface() - continue LOOP - } - - method = coreValue.MethodByName(pair[i]) - if method == (reflect.Value{}) { - if corePtrVal.IsValid() { - method = corePtrVal.MethodByName(pair[i]) - } - } - case reflect.Map: - field = coreValue.MapIndex(reflect.ValueOf(pair[i])) - if field != (reflect.Value{}) { - inter := field.Interface() - if reflect.TypeOf(inter).Kind() == reflect.Func { - method = reflect.ValueOf(inter) - } else { - value = inter - continue LOOP - } - } - default: - return nil, errors.New("Unable to access '" + pair[i] + "', '" + pair[i-1] + "' is not a struct or map") - } - - if method == (reflect.Value{}) { - return nil, errors.New("No method or field '" + pair[i] + "' present on parameter '" + pair[i-1] + "'") - } - - switch right := right.(type) { - case []interface{}: - - givenParams := right - params = make([]reflect.Value, len(givenParams)) - for idx := range givenParams { - params[idx] = reflect.ValueOf(givenParams[idx]) - } - - default: - - if right == nil { - params = []reflect.Value{} - break - } - - params = []reflect.Value{reflect.ValueOf(right)} - } - - params, err = typeConvertParams(method, params) - - if err != nil { - return nil, errors.New("Method call failed - '" + pair[0] + "." + pair[1] + "': " + err.Error()) - } - - returned := method.Call(params) - retLength := len(returned) - - if retLength == 0 { - return nil, errors.New("Method call '" + pair[i-1] + "." + pair[i] + "' did not return any values.") - } - - if retLength == 1 { - - value = returned[0].Interface() - continue - } - - if retLength == 2 { - - errIface := returned[1].Interface() - err, validType := errIface.(error) - - if validType && errIface != nil { - return returned[0].Interface(), err - } - - value = returned[0].Interface() - continue - } - - return nil, errors.New("Method call '" + pair[0] + "." + pair[1] + "' did not return either one value, or a value and an error. Cannot interpret meaning.") - } - - value = castToFloat64(value) - return value, nil - } -} - -func separatorStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - - var ret []interface{} - - switch left := left.(type) { - case []interface{}: - ret = append(left, right) - default: - ret = []interface{}{left, right} - } - - return ret, nil -} - -func inStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { - - for _, value := range right.([]interface{}) { - value = castToFloat64(value) - if left == value { - return true, nil - } - } - return false, nil -} - -// - -func isString(value interface{}) bool { - - switch value.(type) { - case string: - return true - } - return false -} - -func isRegexOrString(value interface{}) bool { - - switch value.(type) { - case string: - return true - case *regexp.Regexp: - return true - } - return false -} - -func isBool(value interface{}) bool { - switch value.(type) { - case bool: - return true - } - return false -} - -func isFloat64(value interface{}) bool { - switch value.(type) { - case float64: - return true - } - return false -} - -/* -Addition usually means between numbers, but can also mean string concat. -String concat needs one (or both) of the sides to be a string. -*/ -func additionTypeCheck(left interface{}, right interface{}) bool { - - if isFloat64(left) && isFloat64(right) { - return true - } - if !isString(left) && !isString(right) { - return false - } - return true -} - -/* -Comparison can either be between numbers, or lexicographic between two strings, -but never between the two. -*/ -func comparatorTypeCheck(left interface{}, right interface{}) bool { - - if isFloat64(left) && isFloat64(right) { - return true - } - if isString(left) && isString(right) { - return true - } - return false -} - -func isArray(value interface{}) bool { - switch value.(type) { - case []interface{}: - return true - } - return false -} - -/* -Converting a boolean to an interface{} requires an allocation. -We can use interned bools to avoid this cost. -*/ -func boolIface(b bool) interface{} { - if b { - return _true - } - return _false -} diff --git a/vendor/github.com/casbin/govaluate/expressionFunctions.go b/vendor/github.com/casbin/govaluate/expressionFunctions.go deleted file mode 100644 index ac6592b3..00000000 --- a/vendor/github.com/casbin/govaluate/expressionFunctions.go +++ /dev/null @@ -1,8 +0,0 @@ -package govaluate - -/* - Represents a function that can be called from within an expression. - This method must return an error if, for any reason, it is unable to produce exactly one unambiguous result. - An error returned will halt execution of the expression. -*/ -type ExpressionFunction func(arguments ...interface{}) (interface{}, error) diff --git a/vendor/github.com/casbin/govaluate/expressionOutputStream.go b/vendor/github.com/casbin/govaluate/expressionOutputStream.go deleted file mode 100644 index 88a84163..00000000 --- a/vendor/github.com/casbin/govaluate/expressionOutputStream.go +++ /dev/null @@ -1,46 +0,0 @@ -package govaluate - -import ( - "bytes" -) - -/* - Holds a series of "transactions" which represent each token as it is output by an outputter (such as ToSQLQuery()). - Some outputs (such as SQL) require a function call or non-c-like syntax to represent an expression. - To accomplish this, this struct keeps track of each translated token as it is output, and can return and rollback those transactions. -*/ -type expressionOutputStream struct { - transactions []string -} - -func (this *expressionOutputStream) add(transaction string) { - this.transactions = append(this.transactions, transaction) -} - -func (this *expressionOutputStream) rollback() string { - - index := len(this.transactions) - 1 - ret := this.transactions[index] - - this.transactions = this.transactions[:index] - return ret -} - -func (this *expressionOutputStream) createString(delimiter string) string { - - var retBuffer bytes.Buffer - var transaction string - - penultimate := len(this.transactions) - 1 - - for i := 0; i < penultimate; i++ { - - transaction = this.transactions[i] - - retBuffer.WriteString(transaction) - retBuffer.WriteString(delimiter) - } - retBuffer.WriteString(this.transactions[penultimate]) - - return retBuffer.String() -} diff --git a/vendor/github.com/casbin/govaluate/lexerState.go b/vendor/github.com/casbin/govaluate/lexerState.go deleted file mode 100644 index 6726e909..00000000 --- a/vendor/github.com/casbin/govaluate/lexerState.go +++ /dev/null @@ -1,373 +0,0 @@ -package govaluate - -import ( - "errors" - "fmt" -) - -type lexerState struct { - isEOF bool - isNullable bool - kind TokenKind - validNextKinds []TokenKind -} - -// lexer states. -// Constant for all purposes except compiler. -var validLexerStates = []lexerState{ - - lexerState{ - kind: UNKNOWN, - isEOF: false, - isNullable: true, - validNextKinds: []TokenKind{ - - PREFIX, - NUMERIC, - BOOLEAN, - VARIABLE, - PATTERN, - FUNCTION, - ACCESSOR, - STRING, - TIME, - CLAUSE, - }, - }, - - lexerState{ - - kind: CLAUSE, - isEOF: false, - isNullable: true, - validNextKinds: []TokenKind{ - - PREFIX, - NUMERIC, - BOOLEAN, - VARIABLE, - PATTERN, - FUNCTION, - ACCESSOR, - STRING, - TIME, - CLAUSE, - CLAUSE_CLOSE, - }, - }, - - lexerState{ - - kind: CLAUSE_CLOSE, - isEOF: true, - isNullable: true, - validNextKinds: []TokenKind{ - - COMPARATOR, - MODIFIER, - NUMERIC, - BOOLEAN, - VARIABLE, - STRING, - PATTERN, - TIME, - CLAUSE, - CLAUSE_CLOSE, - LOGICALOP, - TERNARY, - SEPARATOR, - }, - }, - - lexerState{ - - kind: NUMERIC, - isEOF: true, - isNullable: false, - validNextKinds: []TokenKind{ - - MODIFIER, - COMPARATOR, - LOGICALOP, - CLAUSE_CLOSE, - TERNARY, - SEPARATOR, - }, - }, - lexerState{ - - kind: BOOLEAN, - isEOF: true, - isNullable: false, - validNextKinds: []TokenKind{ - - MODIFIER, - COMPARATOR, - LOGICALOP, - CLAUSE_CLOSE, - TERNARY, - SEPARATOR, - }, - }, - lexerState{ - - kind: STRING, - isEOF: true, - isNullable: false, - validNextKinds: []TokenKind{ - - MODIFIER, - COMPARATOR, - LOGICALOP, - CLAUSE_CLOSE, - TERNARY, - SEPARATOR, - }, - }, - lexerState{ - - kind: TIME, - isEOF: true, - isNullable: false, - validNextKinds: []TokenKind{ - - MODIFIER, - COMPARATOR, - LOGICALOP, - CLAUSE_CLOSE, - SEPARATOR, - }, - }, - lexerState{ - - kind: PATTERN, - isEOF: true, - isNullable: false, - validNextKinds: []TokenKind{ - - MODIFIER, - COMPARATOR, - LOGICALOP, - CLAUSE_CLOSE, - SEPARATOR, - }, - }, - lexerState{ - - kind: VARIABLE, - isEOF: true, - isNullable: false, - validNextKinds: []TokenKind{ - - MODIFIER, - COMPARATOR, - LOGICALOP, - CLAUSE_CLOSE, - TERNARY, - SEPARATOR, - }, - }, - lexerState{ - - kind: MODIFIER, - isEOF: false, - isNullable: false, - validNextKinds: []TokenKind{ - - PREFIX, - NUMERIC, - VARIABLE, - FUNCTION, - ACCESSOR, - STRING, - BOOLEAN, - CLAUSE, - CLAUSE_CLOSE, - }, - }, - lexerState{ - - kind: COMPARATOR, - isEOF: false, - isNullable: false, - validNextKinds: []TokenKind{ - - PREFIX, - NUMERIC, - BOOLEAN, - VARIABLE, - FUNCTION, - ACCESSOR, - STRING, - TIME, - CLAUSE, - CLAUSE_CLOSE, - PATTERN, - }, - }, - lexerState{ - - kind: LOGICALOP, - isEOF: false, - isNullable: false, - validNextKinds: []TokenKind{ - - PREFIX, - NUMERIC, - BOOLEAN, - VARIABLE, - FUNCTION, - ACCESSOR, - STRING, - TIME, - CLAUSE, - CLAUSE_CLOSE, - }, - }, - lexerState{ - - kind: PREFIX, - isEOF: false, - isNullable: false, - validNextKinds: []TokenKind{ - - NUMERIC, - BOOLEAN, - VARIABLE, - FUNCTION, - ACCESSOR, - CLAUSE, - CLAUSE_CLOSE, - }, - }, - - lexerState{ - - kind: TERNARY, - isEOF: false, - isNullable: false, - validNextKinds: []TokenKind{ - - PREFIX, - NUMERIC, - BOOLEAN, - STRING, - TIME, - VARIABLE, - FUNCTION, - ACCESSOR, - CLAUSE, - SEPARATOR, - }, - }, - lexerState{ - - kind: FUNCTION, - isEOF: false, - isNullable: false, - validNextKinds: []TokenKind{ - CLAUSE, - }, - }, - lexerState{ - - kind: ACCESSOR, - isEOF: true, - isNullable: false, - validNextKinds: []TokenKind{ - CLAUSE, - MODIFIER, - COMPARATOR, - LOGICALOP, - CLAUSE_CLOSE, - TERNARY, - SEPARATOR, - }, - }, - lexerState{ - - kind: SEPARATOR, - isEOF: false, - isNullable: true, - validNextKinds: []TokenKind{ - - PREFIX, - NUMERIC, - BOOLEAN, - STRING, - TIME, - VARIABLE, - FUNCTION, - ACCESSOR, - CLAUSE, - }, - }, -} - -func (this lexerState) canTransitionTo(kind TokenKind) bool { - - for _, validKind := range this.validNextKinds { - - if validKind == kind { - return true - } - } - - return false -} - -func checkExpressionSyntax(tokens []ExpressionToken) error { - - var state lexerState - var lastToken ExpressionToken - var err error - - state = validLexerStates[0] - - for _, token := range tokens { - - if !state.canTransitionTo(token.Kind) { - - // call out a specific error for tokens looking like they want to be functions. - if lastToken.Kind == VARIABLE && token.Kind == CLAUSE { - return errors.New("Undefined function " + lastToken.Value.(string)) - } - - firstStateName := fmt.Sprintf("%s [%v]", state.kind.String(), lastToken.Value) - nextStateName := fmt.Sprintf("%s [%v]", token.Kind.String(), token.Value) - - return errors.New("Cannot transition token types from " + firstStateName + " to " + nextStateName) - } - - state, err = getLexerStateForToken(token.Kind) - if err != nil { - return err - } - - if !state.isNullable && token.Value == nil { - - errorMsg := fmt.Sprintf("Token kind '%v' cannot have a nil value", token.Kind.String()) - return errors.New(errorMsg) - } - - lastToken = token - } - - if !state.isEOF { - return errors.New("Unexpected end of expression") - } - return nil -} - -func getLexerStateForToken(kind TokenKind) (lexerState, error) { - - for _, possibleState := range validLexerStates { - - if possibleState.kind == kind { - return possibleState, nil - } - } - - errorMsg := fmt.Sprintf("No lexer state found for token kind '%v'\n", kind.String()) - return validLexerStates[0], errors.New(errorMsg) -} diff --git a/vendor/github.com/casbin/govaluate/lexerStream.go b/vendor/github.com/casbin/govaluate/lexerStream.go deleted file mode 100644 index c6ed76ec..00000000 --- a/vendor/github.com/casbin/govaluate/lexerStream.go +++ /dev/null @@ -1,37 +0,0 @@ -package govaluate - -type lexerStream struct { - source []rune - position int - length int -} - -func newLexerStream(source string) *lexerStream { - - var ret *lexerStream - var runes []rune - - for _, character := range source { - runes = append(runes, character) - } - - ret = new(lexerStream) - ret.source = runes - ret.length = len(runes) - return ret -} - -func (this *lexerStream) readCharacter() rune { - - character := this.source[this.position] - this.position += 1 - return character -} - -func (this *lexerStream) rewind(amount int) { - this.position -= amount -} - -func (this lexerStream) canRead() bool { - return this.position < this.length -} diff --git a/vendor/github.com/casbin/govaluate/parameters.go b/vendor/github.com/casbin/govaluate/parameters.go deleted file mode 100644 index 6c5b9ecb..00000000 --- a/vendor/github.com/casbin/govaluate/parameters.go +++ /dev/null @@ -1,32 +0,0 @@ -package govaluate - -import ( - "errors" -) - -/* - Parameters is a collection of named parameters that can be used by an EvaluableExpression to retrieve parameters - when an expression tries to use them. -*/ -type Parameters interface { - - /* - Get gets the parameter of the given name, or an error if the parameter is unavailable. - Failure to find the given parameter should be indicated by returning an error. - */ - Get(name string) (interface{}, error) -} - -type MapParameters map[string]interface{} - -func (p MapParameters) Get(name string) (interface{}, error) { - - value, found := p[name] - - if !found { - errorMessage := "No parameter '" + name + "' found." - return nil, errors.New(errorMessage) - } - - return value, nil -} diff --git a/vendor/github.com/casbin/govaluate/parsing.go b/vendor/github.com/casbin/govaluate/parsing.go deleted file mode 100644 index dae78f7d..00000000 --- a/vendor/github.com/casbin/govaluate/parsing.go +++ /dev/null @@ -1,509 +0,0 @@ -package govaluate - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - "time" - "unicode" -) - -func parseTokens(expression string, functions map[string]ExpressionFunction) ([]ExpressionToken, error) { - - var ret []ExpressionToken - var token ExpressionToken - var stream *lexerStream - var state lexerState - var err error - var found bool - - stream = newLexerStream(expression) - state = validLexerStates[0] - - for stream.canRead() { - - token, err, found = readToken(stream, state, functions) - - if err != nil { - return ret, err - } - - if !found { - break - } - - state, err = getLexerStateForToken(token.Kind) - if err != nil { - return ret, err - } - - // append this valid token - ret = append(ret, token) - } - - err = checkBalance(ret) - if err != nil { - return nil, err - } - - return ret, nil -} - -func readToken(stream *lexerStream, state lexerState, functions map[string]ExpressionFunction) (ExpressionToken, error, bool) { - - var function ExpressionFunction - var ret ExpressionToken - var tokenValue interface{} - var tokenTime time.Time - var tokenString string - var kind TokenKind - var character rune - var found bool - var completed bool - var err error - - // numeric is 0-9, or . or 0x followed by digits - // string starts with ' - // variable is alphanumeric, always starts with a letter - // bracket always means variable - // symbols are anything non-alphanumeric - // all others read into a buffer until they reach the end of the stream - for stream.canRead() { - - character = stream.readCharacter() - - if unicode.IsSpace(character) { - continue - } - - // numeric constant - if isNumeric(character) { - - if stream.canRead() && character == '0' { - character = stream.readCharacter() - - if stream.canRead() && character == 'x' { - tokenString, _ = readUntilFalse(stream, false, true, true, isHexDigit) - tokenValueInt, err := strconv.ParseUint(tokenString, 16, 64) - - if err != nil { - errorMsg := fmt.Sprintf("Unable to parse hex value '%v' to uint64\n", tokenString) - return ExpressionToken{}, errors.New(errorMsg), false - } - - kind = NUMERIC - tokenValue = float64(tokenValueInt) - break - } else { - stream.rewind(1) - } - } - - tokenString = readTokenUntilFalse(stream, isNumeric) - tokenValue, err = strconv.ParseFloat(tokenString, 64) - - if err != nil { - errorMsg := fmt.Sprintf("Unable to parse numeric value '%v' to float64\n", tokenString) - return ExpressionToken{}, errors.New(errorMsg), false - } - kind = NUMERIC - break - } - - // comma, separator - if character == ',' { - - tokenValue = "," - kind = SEPARATOR - break - } - - // escaped variable - if character == '[' { - - tokenValue, completed = readUntilFalse(stream, true, false, true, isNotClosingBracket) - kind = VARIABLE - - if !completed { - return ExpressionToken{}, errors.New("Unclosed parameter bracket"), false - } - - // above method normally rewinds us to the closing bracket, which we want to skip. - stream.rewind(-1) - break - } - - // regular variable - or function? - if unicode.IsLetter(character) { - - tokenString = readTokenUntilFalse(stream, isVariableName) - - tokenValue = tokenString - kind = VARIABLE - - // boolean? - if tokenValue == "true" { - - kind = BOOLEAN - tokenValue = true - } else { - - if tokenValue == "false" { - - kind = BOOLEAN - tokenValue = false - } - } - - // textual operator? - if tokenValue == "in" || tokenValue == "IN" { - - // force lower case for consistency - tokenValue = "in" - kind = COMPARATOR - } - - // function? - function, found = functions[tokenString] - if found { - kind = FUNCTION - tokenValue = function - } - - // accessor? - accessorIndex := strings.Index(tokenString, ".") - if accessorIndex > 0 { - - // check that it doesn't end with a hanging period - if tokenString[len(tokenString)-1] == '.' { - errorMsg := fmt.Sprintf("Hanging accessor on token '%s'", tokenString) - return ExpressionToken{}, errors.New(errorMsg), false - } - - kind = ACCESSOR - splits := strings.Split(tokenString, ".") - tokenValue = splits - } - break - } - - if !isNotQuote(character) { - tokenValue, completed = readUntilFalse(stream, true, false, true, isNotQuote) - - if !completed { - return ExpressionToken{}, errors.New("Unclosed string literal"), false - } - - // advance the stream one position, since reading until false assumes the terminator is a real token - stream.rewind(-1) - - // check to see if this can be parsed as a time. - tokenTime, found = tryParseTime(tokenValue.(string)) - if found { - kind = TIME - tokenValue = tokenTime - } else { - kind = STRING - } - break - } - - if character == '(' { - tokenValue = character - kind = CLAUSE - break - } - - if character == ')' { - tokenValue = character - kind = CLAUSE_CLOSE - break - } - - // must be a known symbol - tokenString = readTokenUntilFalse(stream, isNotAlphanumeric) - tokenValue = tokenString - - // quick hack for the case where "-" can mean "prefixed negation" or "minus", which are used - // very differently. - if state.canTransitionTo(PREFIX) { - _, found = prefixSymbols[tokenString] - if found { - - kind = PREFIX - break - } - } - _, found = modifierSymbols[tokenString] - if found { - - kind = MODIFIER - break - } - - _, found = logicalSymbols[tokenString] - if found { - - kind = LOGICALOP - break - } - - _, found = comparatorSymbols[tokenString] - if found { - - kind = COMPARATOR - break - } - - _, found = ternarySymbols[tokenString] - if found { - - kind = TERNARY - break - } - - errorMessage := fmt.Sprintf("Invalid token: '%s'", tokenString) - return ret, errors.New(errorMessage), false - } - - ret.Kind = kind - ret.Value = tokenValue - - return ret, nil, (kind != UNKNOWN) -} - -func readTokenUntilFalse(stream *lexerStream, condition func(rune) bool) string { - - var ret string - - stream.rewind(1) - ret, _ = readUntilFalse(stream, false, true, true, condition) - return ret -} - -/* -Returns the string that was read until the given [condition] was false, or whitespace was broken. -Returns false if the stream ended before whitespace was broken or condition was met. -*/ -func readUntilFalse(stream *lexerStream, includeWhitespace bool, breakWhitespace bool, allowEscaping bool, condition func(rune) bool) (string, bool) { - - var tokenBuffer bytes.Buffer - var character rune - var conditioned bool - - conditioned = false - - for stream.canRead() { - - character = stream.readCharacter() - - // Use backslashes to escape anything - if allowEscaping && character == '\\' { - - character = stream.readCharacter() - tokenBuffer.WriteString(string(character)) - continue - } - - if unicode.IsSpace(character) { - - if breakWhitespace && tokenBuffer.Len() > 0 { - conditioned = true - break - } - if !includeWhitespace { - continue - } - } - - if condition(character) { - tokenBuffer.WriteString(string(character)) - } else { - conditioned = true - stream.rewind(1) - break - } - } - - return tokenBuffer.String(), conditioned -} - -/* -Checks to see if any optimizations can be performed on the given [tokens], which form a complete, valid expression. -The returns slice will represent the optimized (or unmodified) list of tokens to use. -*/ -func optimizeTokens(tokens []ExpressionToken) ([]ExpressionToken, error) { - - var token ExpressionToken - var symbol OperatorSymbol - var err error - var index int - - for index, token = range tokens { - - // if we find a regex operator, and the right-hand value is a constant, precompile and replace with a pattern. - if token.Kind != COMPARATOR { - continue - } - - symbol = comparatorSymbols[token.Value.(string)] - if symbol != REQ && symbol != NREQ { - continue - } - - index++ - token = tokens[index] - if token.Kind == STRING { - - token.Kind = PATTERN - token.Value, err = regexp.Compile(token.Value.(string)) - - if err != nil { - return tokens, err - } - - tokens[index] = token - } - } - return tokens, nil -} - -/* -Checks the balance of tokens which have multiple parts, such as parenthesis. -*/ -func checkBalance(tokens []ExpressionToken) error { - - var stream *tokenStream - var token ExpressionToken - var parens int - - stream = newTokenStream(tokens) - - for stream.hasNext() { - - token = stream.next() - if token.Kind == CLAUSE { - parens++ - continue - } - if token.Kind == CLAUSE_CLOSE { - parens-- - continue - } - } - - if parens != 0 { - return errors.New("Unbalanced parenthesis") - } - return nil -} - -func isHexDigit(character rune) bool { - - character = unicode.ToLower(character) - - return unicode.IsDigit(character) || - character == 'a' || - character == 'b' || - character == 'c' || - character == 'd' || - character == 'e' || - character == 'f' -} - -func isNumeric(character rune) bool { - - return unicode.IsDigit(character) || character == '.' -} - -func isNotQuote(character rune) bool { - - return character != '\'' && character != '"' -} - -func isNotAlphanumeric(character rune) bool { - - return !(unicode.IsDigit(character) || - unicode.IsLetter(character) || - character == '(' || - character == ')' || - character == '[' || - character == ']' || // starting to feel like there needs to be an `isOperation` func (#59) - !isNotQuote(character)) -} - -func isVariableName(character rune) bool { - - return unicode.IsLetter(character) || - unicode.IsDigit(character) || - character == '_' || - character == '.' -} - -func isNotClosingBracket(character rune) bool { - - return character != ']' -} - -/* -Attempts to parse the [candidate] as a Time. -Tries a series of standardized date formats, returns the Time if one applies, -otherwise returns false through the second return. -*/ -func tryParseTime(candidate string) (time.Time, bool) { - - var ret time.Time - var found bool - - timeFormats := [...]string{ - time.ANSIC, - time.UnixDate, - time.RubyDate, - time.Kitchen, - time.RFC3339, - time.RFC3339Nano, - "2006-01-02", // RFC 3339 - "2006-01-02 15:04", // RFC 3339 with minutes - "2006-01-02 15:04:05", // RFC 3339 with seconds - "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone - "2006-01-02T15Z0700", // ISO8601 with hour - "2006-01-02T15:04Z0700", // ISO8601 with minutes - "2006-01-02T15:04:05Z0700", // ISO8601 with seconds - "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds - } - - for _, format := range timeFormats { - - ret, found = tryParseExactTime(candidate, format) - if found { - return ret, true - } - } - - return time.Now(), false -} - -func tryParseExactTime(candidate string, format string) (time.Time, bool) { - - var ret time.Time - var err error - - ret, err = time.ParseInLocation(format, candidate, time.Local) - if err != nil { - return time.Now(), false - } - - return ret, true -} - -func getFirstRune(candidate string) rune { - - for _, character := range candidate { - return character - } - - return 0 -} diff --git a/vendor/github.com/casbin/govaluate/sanitizedParameters.go b/vendor/github.com/casbin/govaluate/sanitizedParameters.go deleted file mode 100644 index b254bff6..00000000 --- a/vendor/github.com/casbin/govaluate/sanitizedParameters.go +++ /dev/null @@ -1,43 +0,0 @@ -package govaluate - -// sanitizedParameters is a wrapper for Parameters that does sanitization as -// parameters are accessed. -type sanitizedParameters struct { - orig Parameters -} - -func (p sanitizedParameters) Get(key string) (interface{}, error) { - value, err := p.orig.Get(key) - if err != nil { - return nil, err - } - - return castToFloat64(value), nil -} - -func castToFloat64(value interface{}) interface{} { - switch value := value.(type) { - case uint8: - return float64(value) - case uint16: - return float64(value) - case uint32: - return float64(value) - case uint64: - return float64(value) - case int8: - return float64(value) - case int16: - return float64(value) - case int32: - return float64(value) - case int64: - return float64(value) - case int: - return float64(value) - case float32: - return float64(value) - } - - return value -} diff --git a/vendor/github.com/casbin/govaluate/stagePlanner.go b/vendor/github.com/casbin/govaluate/stagePlanner.go deleted file mode 100644 index 400a2879..00000000 --- a/vendor/github.com/casbin/govaluate/stagePlanner.go +++ /dev/null @@ -1,728 +0,0 @@ -package govaluate - -import ( - "errors" - "fmt" - "time" -) - -var stageSymbolMap = map[OperatorSymbol]evaluationOperator{ - EQ: equalStage, - NEQ: notEqualStage, - GT: gtStage, - LT: ltStage, - GTE: gteStage, - LTE: lteStage, - REQ: regexStage, - NREQ: notRegexStage, - AND: andStage, - OR: orStage, - IN: inStage, - BITWISE_OR: bitwiseOrStage, - BITWISE_AND: bitwiseAndStage, - BITWISE_XOR: bitwiseXORStage, - BITWISE_LSHIFT: leftShiftStage, - BITWISE_RSHIFT: rightShiftStage, - PLUS: addStage, - MINUS: subtractStage, - MULTIPLY: multiplyStage, - DIVIDE: divideStage, - MODULUS: modulusStage, - EXPONENT: exponentStage, - NEGATE: negateStage, - INVERT: invertStage, - BITWISE_NOT: bitwiseNotStage, - TERNARY_TRUE: ternaryIfStage, - TERNARY_FALSE: ternaryElseStage, - COALESCE: ternaryElseStage, - SEPARATE: separatorStage, -} - -/* -A "precedent" is a function which will recursively parse new evaluateionStages from a given stream of tokens. -It's called a `precedent` because it is expected to handle exactly what precedence of operator, -and defer to other `precedent`s for other operators. -*/ -type precedent func(stream *tokenStream) (*evaluationStage, error) - -/* -A convenience function for specifying the behavior of a `precedent`. -Most `precedent` functions can be described by the same function, just with different type checks, symbols, and error formats. -This struct is passed to `makePrecedentFromPlanner` to create a `precedent` function. -*/ -type precedencePlanner struct { - validSymbols map[string]OperatorSymbol - validKinds []TokenKind - - typeErrorFormat string - - next precedent - nextRight precedent -} - -var planPrefix precedent -var planExponential precedent -var planMultiplicative precedent -var planAdditive precedent -var planBitwise precedent -var planShift precedent -var planComparator precedent -var planLogicalAnd precedent -var planLogicalOr precedent -var planTernary precedent -var planSeparator precedent - -func init() { - - // all these stages can use the same code (in `planPrecedenceLevel`) to execute, - // they simply need different type checks, symbols, and recursive precedents. - // While not all precedent phases are listed here, most can be represented this way. - planPrefix = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: prefixSymbols, - validKinds: []TokenKind{PREFIX}, - typeErrorFormat: prefixErrorFormat, - nextRight: planFunction, - }) - planExponential = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: exponentialSymbolsS, - validKinds: []TokenKind{MODIFIER}, - typeErrorFormat: modifierErrorFormat, - next: planFunction, - }) - planMultiplicative = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: multiplicativeSymbols, - validKinds: []TokenKind{MODIFIER}, - typeErrorFormat: modifierErrorFormat, - next: planExponential, - }) - planAdditive = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: additiveSymbols, - validKinds: []TokenKind{MODIFIER}, - typeErrorFormat: modifierErrorFormat, - next: planMultiplicative, - }) - planShift = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: bitwiseShiftSymbols, - validKinds: []TokenKind{MODIFIER}, - typeErrorFormat: modifierErrorFormat, - next: planAdditive, - }) - planBitwise = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: bitwiseSymbols, - validKinds: []TokenKind{MODIFIER}, - typeErrorFormat: modifierErrorFormat, - next: planShift, - }) - planComparator = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: comparatorSymbols, - validKinds: []TokenKind{COMPARATOR}, - typeErrorFormat: comparatorErrorFormat, - next: planBitwise, - }) - planLogicalAnd = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: map[string]OperatorSymbol{"&&": AND}, - validKinds: []TokenKind{LOGICALOP}, - typeErrorFormat: logicalErrorFormat, - next: planComparator, - }) - planLogicalOr = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: map[string]OperatorSymbol{"||": OR}, - validKinds: []TokenKind{LOGICALOP}, - typeErrorFormat: logicalErrorFormat, - next: planLogicalAnd, - }) - planTernary = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: ternarySymbols, - validKinds: []TokenKind{TERNARY}, - typeErrorFormat: ternaryErrorFormat, - next: planLogicalOr, - }) - planSeparator = makePrecedentFromPlanner(&precedencePlanner{ - validSymbols: separatorSymbols, - validKinds: []TokenKind{SEPARATOR}, - next: planTernary, - }) -} - -/* -Given a planner, creates a function which will evaluate a specific precedence level of operators, -and link it to other `precedent`s which recurse to parse other precedence levels. -*/ -func makePrecedentFromPlanner(planner *precedencePlanner) precedent { - - var generated precedent - var nextRight precedent - - generated = func(stream *tokenStream) (*evaluationStage, error) { - return planPrecedenceLevel( - stream, - planner.typeErrorFormat, - planner.validSymbols, - planner.validKinds, - nextRight, - planner.next, - ) - } - - if planner.nextRight != nil { - nextRight = planner.nextRight - } else { - nextRight = generated - } - - return generated -} - -/* -Creates a `evaluationStageList` object which represents an execution plan (or tree) -which is used to completely evaluate a set of tokens at evaluation-time. -The three stages of evaluation can be thought of as parsing strings to tokens, then tokens to a stage list, then evaluation with parameters. -*/ -func planStages(tokens []ExpressionToken) (*evaluationStage, error) { - - stream := newTokenStream(tokens) - - stage, err := planTokens(stream) - if err != nil { - return nil, err - } - - // while we're now fully-planned, we now need to re-order same-precedence operators. - // this could probably be avoided with a different planning method - reorderStages(stage) - - stage = elideLiterals(stage) - return stage, nil -} - -func planTokens(stream *tokenStream) (*evaluationStage, error) { - - if !stream.hasNext() { - return nil, nil - } - - return planSeparator(stream) -} - -/* -The most usual method of parsing an evaluation stage for a given precedence. -Most stages use the same logic -*/ -func planPrecedenceLevel( - stream *tokenStream, - typeErrorFormat string, - validSymbols map[string]OperatorSymbol, - validKinds []TokenKind, - rightPrecedent precedent, - leftPrecedent precedent) (*evaluationStage, error) { - - var token ExpressionToken - var symbol OperatorSymbol - var leftStage, rightStage *evaluationStage - var checks typeChecks - var err error - var keyFound bool - - if leftPrecedent != nil { - - leftStage, err = leftPrecedent(stream) - if err != nil { - return nil, err - } - } - - rewind := func() (*evaluationStage, error) { - stream.rewind() - return leftStage, nil - } - - if stream.hasNext() { - - token = stream.next() - - if len(validKinds) > 0 { - - keyFound = false - for _, kind := range validKinds { - if kind == token.Kind { - keyFound = true - break - } - } - - if !keyFound { - return rewind() - } - } - - if validSymbols != nil { - - if !isString(token.Value) { - return rewind() - } - - symbol, keyFound = validSymbols[token.Value.(string)] - if !keyFound { - return rewind() - } - } - - if rightPrecedent != nil { - rightStage, err = rightPrecedent(stream) - if err != nil { - return nil, err - } - } - - checks = findTypeChecks(symbol) - - return &evaluationStage{ - - symbol: symbol, - leftStage: leftStage, - rightStage: rightStage, - operator: stageSymbolMap[symbol], - - leftTypeCheck: checks.left, - rightTypeCheck: checks.right, - typeCheck: checks.combined, - typeErrorFormat: typeErrorFormat, - }, nil - } - - return rewind() -} - -/* -A special case where functions need to be of higher precedence than values, and need a special wrapped execution stage operator. -*/ -func planFunction(stream *tokenStream) (*evaluationStage, error) { - - var token ExpressionToken - var rightStage *evaluationStage - var err error - - token = stream.next() - - if token.Kind != FUNCTION { - stream.rewind() - return planAccessor(stream) - } - - rightStage, err = planAccessor(stream) - if err != nil { - return nil, err - } - - return &evaluationStage{ - - symbol: FUNCTIONAL, - rightStage: rightStage, - operator: makeFunctionStage(token.Value.(ExpressionFunction)), - typeErrorFormat: "Unable to run function '%v': %v", - }, nil -} - -func planAccessor(stream *tokenStream) (*evaluationStage, error) { - - var token, otherToken ExpressionToken - var rightStage *evaluationStage - var err error - - if !stream.hasNext() { - return nil, nil - } - - token = stream.next() - - if token.Kind != ACCESSOR { - stream.rewind() - return planValue(stream) - } - - // check if this is meant to be a function or a field. - // fields have a clause next to them, functions do not. - // if it's a function, parse the arguments. Otherwise leave the right stage null. - if stream.hasNext() { - - otherToken = stream.next() - if otherToken.Kind == CLAUSE { - - stream.rewind() - - rightStage, err = planTokens(stream) - if err != nil { - return nil, err - } - } else { - stream.rewind() - } - } - - return &evaluationStage{ - - symbol: ACCESS, - rightStage: rightStage, - operator: makeAccessorStage(token.Value.([]string)), - typeErrorFormat: "Unable to access parameter field or method '%v': %v", - }, nil -} - -/* -A truly special precedence function, this handles all the "lowest-case" errata of the process, including literals, parmeters, -clauses, and prefixes. -*/ -func planValue(stream *tokenStream) (*evaluationStage, error) { - - var token ExpressionToken - var symbol OperatorSymbol - var ret *evaluationStage - var operator evaluationOperator - var err error - - if !stream.hasNext() { - return nil, nil - } - - token = stream.next() - - switch token.Kind { - - case CLAUSE: - - ret, err = planTokens(stream) - if err != nil { - return nil, err - } - - // advance past the CLAUSE_CLOSE token. We know that it's a CLAUSE_CLOSE, because at parse-time we check for unbalanced parens. - stream.next() - - // the stage we got represents all of the logic contained within the parens - // but for technical reasons, we need to wrap this stage in a "noop" stage which breaks long chains of precedence. - // see github #33. - ret = &evaluationStage{ - rightStage: ret, - operator: noopStageRight, - symbol: NOOP, - } - - return ret, nil - - case CLAUSE_CLOSE: - - // when functions have empty params, this will be hit. In this case, we don't have any evaluation stage to do, - // so we just return nil so that the stage planner continues on its way. - stream.rewind() - return nil, nil - - case VARIABLE: - operator = makeParameterStage(token.Value.(string)) - - case NUMERIC: - fallthrough - case STRING: - fallthrough - case PATTERN: - fallthrough - case BOOLEAN: - symbol = LITERAL - operator = makeLiteralStage(token.Value) - case TIME: - symbol = LITERAL - operator = makeLiteralStage(float64(token.Value.(time.Time).Unix())) - - case PREFIX: - stream.rewind() - return planPrefix(stream) - } - - if operator == nil { - errorMsg := fmt.Sprintf("Unable to plan token kind: '%s', value: '%v'", token.Kind.String(), token.Value) - return nil, errors.New(errorMsg) - } - - return &evaluationStage{ - symbol: symbol, - operator: operator, - }, nil -} - -/* -Convenience function to pass a triplet of typechecks between `findTypeChecks` and `planPrecedenceLevel`. -Each of these members may be nil, which indicates that type does not matter for that value. -*/ -type typeChecks struct { - left stageTypeCheck - right stageTypeCheck - combined stageCombinedTypeCheck -} - -/* -Maps a given [symbol] to a set of typechecks to be used during runtime. -*/ -func findTypeChecks(symbol OperatorSymbol) typeChecks { - - switch symbol { - case GT: - fallthrough - case LT: - fallthrough - case GTE: - fallthrough - case LTE: - return typeChecks{ - combined: comparatorTypeCheck, - } - case REQ: - fallthrough - case NREQ: - return typeChecks{ - left: isString, - right: isRegexOrString, - } - case AND: - fallthrough - case OR: - return typeChecks{ - left: isBool, - right: isBool, - } - case IN: - return typeChecks{ - right: isArray, - } - case BITWISE_LSHIFT: - fallthrough - case BITWISE_RSHIFT: - fallthrough - case BITWISE_OR: - fallthrough - case BITWISE_AND: - fallthrough - case BITWISE_XOR: - return typeChecks{ - left: isFloat64, - right: isFloat64, - } - case PLUS: - return typeChecks{ - combined: additionTypeCheck, - } - case MINUS: - fallthrough - case MULTIPLY: - fallthrough - case DIVIDE: - fallthrough - case MODULUS: - fallthrough - case EXPONENT: - return typeChecks{ - left: isFloat64, - right: isFloat64, - } - case NEGATE: - return typeChecks{ - right: isFloat64, - } - case INVERT: - return typeChecks{ - right: isBool, - } - case BITWISE_NOT: - return typeChecks{ - right: isFloat64, - } - case TERNARY_TRUE: - return typeChecks{ - left: isBool, - } - - // unchecked cases - case EQ: - fallthrough - case NEQ: - return typeChecks{} - case TERNARY_FALSE: - fallthrough - case COALESCE: - fallthrough - default: - return typeChecks{} - } -} - -/* -During stage planning, stages of equal precedence are parsed such that they'll be evaluated in reverse order. -For commutative operators like "+" or "-", it's no big deal. But for order-specific operators, it ruins the expected result. -*/ -func reorderStages(rootStage *evaluationStage) { - - // traverse every rightStage until we find multiples in a row of the same precedence. - var identicalPrecedences []*evaluationStage - var currentStage, nextStage *evaluationStage - var precedence, currentPrecedence operatorPrecedence - - nextStage = rootStage - precedence = findOperatorPrecedenceForSymbol(rootStage.symbol) - - for nextStage != nil { - - currentStage = nextStage - nextStage = currentStage.rightStage - - // left depth first, since this entire method only looks for precedences down the right side of the tree - if currentStage.leftStage != nil { - reorderStages(currentStage.leftStage) - } - - currentPrecedence = findOperatorPrecedenceForSymbol(currentStage.symbol) - - if currentPrecedence == precedence { - identicalPrecedences = append(identicalPrecedences, currentStage) - continue - } - - // precedence break. - // See how many in a row we had, and reorder if there's more than one. - if len(identicalPrecedences) > 1 { - mirrorStageSubtree(identicalPrecedences) - } - - identicalPrecedences = []*evaluationStage{currentStage} - precedence = currentPrecedence - } - - if len(identicalPrecedences) > 1 { - mirrorStageSubtree(identicalPrecedences) - } -} - -/* -Performs a "mirror" on a subtree of stages. -This mirror functionally inverts the order of execution for all members of the [stages] list. -That list is assumed to be a root-to-leaf (ordered) list of evaluation stages, where each is a right-hand stage of the last. -*/ -func mirrorStageSubtree(stages []*evaluationStage) { - - var rootStage, inverseStage, carryStage, frontStage *evaluationStage - - stagesLength := len(stages) - - // reverse all right/left - for _, frontStage = range stages { - - carryStage = frontStage.rightStage - frontStage.rightStage = frontStage.leftStage - frontStage.leftStage = carryStage - } - - // end left swaps with root right - rootStage = stages[0] - frontStage = stages[stagesLength-1] - - carryStage = frontStage.leftStage - frontStage.leftStage = rootStage.rightStage - rootStage.rightStage = carryStage - - // for all non-root non-end stages, right is swapped with inverse stage right in list - for i := 0; i < (stagesLength-2)/2+1; i++ { - - frontStage = stages[i+1] - inverseStage = stages[stagesLength-i-1] - - carryStage = frontStage.rightStage - frontStage.rightStage = inverseStage.rightStage - inverseStage.rightStage = carryStage - } - - // swap all other information with inverse stages - for i := 0; i < stagesLength/2; i++ { - - frontStage = stages[i] - inverseStage = stages[stagesLength-i-1] - frontStage.swapWith(inverseStage) - } -} - -/* -Recurses through all operators in the entire tree, eliding operators where both sides are literals. -*/ -func elideLiterals(root *evaluationStage) *evaluationStage { - - if root.leftStage != nil { - root.leftStage = elideLiterals(root.leftStage) - } - - if root.rightStage != nil { - root.rightStage = elideLiterals(root.rightStage) - } - - return elideStage(root) -} - -/* -Elides a specific stage, if possible. -Returns the unmodified [root] stage if it cannot or should not be elided. -Otherwise, returns a new stage representing the condensed value from the elided stages. -*/ -func elideStage(root *evaluationStage) *evaluationStage { - - var leftValue, rightValue, result interface{} - var err error - - // right side must be a non-nil value. Left side must be nil or a value. - if root.rightStage == nil || - root.rightStage.symbol != LITERAL || - root.leftStage == nil || - root.leftStage.symbol != LITERAL { - return root - } - - // don't elide some operators - switch root.symbol { - case SEPARATE: - fallthrough - case IN: - return root - } - - // both sides are values, get their actual values. - // errors should be near-impossible here. If we encounter them, just abort this optimization. - leftValue, err = root.leftStage.operator(nil, nil, nil) - if err != nil { - return root - } - - rightValue, err = root.rightStage.operator(nil, nil, nil) - if err != nil { - return root - } - - // typcheck, since the grammar checker is a bit loose with which operator symbols go together. - err = typeCheck(root.leftTypeCheck, leftValue, root.symbol, root.typeErrorFormat) - if err != nil { - return root - } - - err = typeCheck(root.rightTypeCheck, rightValue, root.symbol, root.typeErrorFormat) - if err != nil { - return root - } - - if root.typeCheck != nil && !root.typeCheck(leftValue, rightValue) { - return root - } - - // pre-calculate, and return a new stage representing the result. - result, err = root.operator(leftValue, rightValue, nil) - if err != nil { - return root - } - - return &evaluationStage{ - symbol: LITERAL, - operator: makeLiteralStage(result), - } -} diff --git a/vendor/github.com/casbin/govaluate/test.sh b/vendor/github.com/casbin/govaluate/test.sh deleted file mode 100644 index 11aa8b33..00000000 --- a/vendor/github.com/casbin/govaluate/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Script that runs tests, code coverage, and benchmarks all at once. -# Builds a symlink in /tmp, mostly to avoid messing with GOPATH at the user's shell level. - -TEMPORARY_PATH="/tmp/govaluate_test" -SRC_PATH="${TEMPORARY_PATH}/src" -FULL_PATH="${TEMPORARY_PATH}/src/govaluate" - -# set up temporary directory -rm -rf "${FULL_PATH}" -mkdir -p "${SRC_PATH}" - -ln -s $(pwd) "${FULL_PATH}" -export GOPATH="${TEMPORARY_PATH}" - -pushd "${TEMPORARY_PATH}/src/govaluate" - -# run the actual tests. -export GOVALUATE_TORTURE_TEST="true" -go test -bench=. -benchmem #-coverprofile coverage.out -status=$? - -if [ "${status}" != 0 ]; -then - exit $status -fi - -# coverage -# disabled because travis go1.4 seems not to support it suddenly? -#go tool cover -func=coverage.out - -popd diff --git a/vendor/github.com/casbin/govaluate/tokenStream.go b/vendor/github.com/casbin/govaluate/tokenStream.go deleted file mode 100644 index 7c7c40ab..00000000 --- a/vendor/github.com/casbin/govaluate/tokenStream.go +++ /dev/null @@ -1,30 +0,0 @@ -package govaluate - -type tokenStream struct { - tokens []ExpressionToken - index int - tokenLength int -} - -func newTokenStream(tokens []ExpressionToken) *tokenStream { - ret := new(tokenStream) - ret.tokens = tokens - ret.tokenLength = len(tokens) - return ret -} - -func (this *tokenStream) rewind() { - this.index -= 1 -} - -func (this *tokenStream) next() ExpressionToken { - token := this.tokens[this.index] - - this.index += 1 - return token -} - -func (this tokenStream) hasNext() bool { - - return this.index < this.tokenLength -} diff --git a/vendor/gopkg.in/ini.v1/.editorconfig b/vendor/github.com/go-ini/ini/.editorconfig similarity index 100% rename from vendor/gopkg.in/ini.v1/.editorconfig rename to vendor/github.com/go-ini/ini/.editorconfig diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/github.com/go-ini/ini/.gitignore similarity index 100% rename from vendor/gopkg.in/ini.v1/.gitignore rename to vendor/github.com/go-ini/ini/.gitignore diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/github.com/go-ini/ini/.golangci.yml similarity index 100% rename from vendor/gopkg.in/ini.v1/.golangci.yml rename to vendor/github.com/go-ini/ini/.golangci.yml diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/github.com/go-ini/ini/LICENSE similarity index 100% rename from vendor/gopkg.in/ini.v1/LICENSE rename to vendor/github.com/go-ini/ini/LICENSE diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/github.com/go-ini/ini/Makefile similarity index 100% rename from vendor/gopkg.in/ini.v1/Makefile rename to vendor/github.com/go-ini/ini/Makefile diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/github.com/go-ini/ini/README.md similarity index 100% rename from vendor/gopkg.in/ini.v1/README.md rename to vendor/github.com/go-ini/ini/README.md diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml similarity index 100% rename from vendor/gopkg.in/ini.v1/codecov.yml rename to vendor/github.com/go-ini/ini/codecov.yml diff --git a/vendor/gopkg.in/ini.v1/data_source.go b/vendor/github.com/go-ini/ini/data_source.go similarity index 100% rename from vendor/gopkg.in/ini.v1/data_source.go rename to vendor/github.com/go-ini/ini/data_source.go diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go similarity index 100% rename from vendor/gopkg.in/ini.v1/deprecated.go rename to vendor/github.com/go-ini/ini/deprecated.go diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/github.com/go-ini/ini/error.go similarity index 100% rename from vendor/gopkg.in/ini.v1/error.go rename to vendor/github.com/go-ini/ini/error.go diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/github.com/go-ini/ini/file.go similarity index 100% rename from vendor/gopkg.in/ini.v1/file.go rename to vendor/github.com/go-ini/ini/file.go diff --git a/vendor/gopkg.in/ini.v1/helper.go b/vendor/github.com/go-ini/ini/helper.go similarity index 100% rename from vendor/gopkg.in/ini.v1/helper.go rename to vendor/github.com/go-ini/ini/helper.go diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/github.com/go-ini/ini/ini.go similarity index 100% rename from vendor/gopkg.in/ini.v1/ini.go rename to vendor/github.com/go-ini/ini/ini.go diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/github.com/go-ini/ini/key.go similarity index 100% rename from vendor/gopkg.in/ini.v1/key.go rename to vendor/github.com/go-ini/ini/key.go diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/github.com/go-ini/ini/parser.go similarity index 100% rename from vendor/gopkg.in/ini.v1/parser.go rename to vendor/github.com/go-ini/ini/parser.go diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/github.com/go-ini/ini/section.go similarity index 100% rename from vendor/gopkg.in/ini.v1/section.go rename to vendor/github.com/go-ini/ini/section.go diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/github.com/go-ini/ini/struct.go similarity index 100% rename from vendor/gopkg.in/ini.v1/struct.go rename to vendor/github.com/go-ini/ini/struct.go diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index 1bbaf2f2..9ab0705a 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.21.0-green.svg) +![Project status](https://img.shields.io/badge/version-10.22.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index 58a1470b..b6fbaafa 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -253,7 +253,7 @@ func parseOneOfParam2(s string) []string { oneofValsCacheRWLock.RUnlock() if !ok { oneofValsCacheRWLock.Lock() - vals = splitParamsRegex.FindAllString(s, -1) + vals = splitParamsRegex().FindAllString(s, -1) for i := 0; i < len(vals); i++ { vals[i] = strings.Replace(vals[i], "'", "", -1) } @@ -264,15 +264,15 @@ func parseOneOfParam2(s string) []string { } func isURLEncoded(fl FieldLevel) bool { - return uRLEncodedRegex.MatchString(fl.Field().String()) + return uRLEncodedRegex().MatchString(fl.Field().String()) } func isHTMLEncoded(fl FieldLevel) bool { - return hTMLEncodedRegex.MatchString(fl.Field().String()) + return hTMLEncodedRegex().MatchString(fl.Field().String()) } func isHTML(fl FieldLevel) bool { - return hTMLRegex.MatchString(fl.Field().String()) + return hTMLRegex().MatchString(fl.Field().String()) } func isOneOf(fl FieldLevel) bool { @@ -429,7 +429,7 @@ func isSSN(fl FieldLevel) bool { return false } - return sSNRegex.MatchString(field.String()) + return sSNRegex().MatchString(field.String()) } // isLongitude is the validation function for validating if the field's value is a valid longitude coordinate. @@ -452,7 +452,7 @@ func isLongitude(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } - return longitudeRegex.MatchString(v) + return longitudeRegex().MatchString(v) } // isLatitude is the validation function for validating if the field's value is a valid latitude coordinate. @@ -475,7 +475,7 @@ func isLatitude(fl FieldLevel) bool { panic(fmt.Sprintf("Bad field type %T", field.Interface())) } - return latitudeRegex.MatchString(v) + return latitudeRegex().MatchString(v) } // isDataURI is the validation function for validating if the field's value is a valid data URI. @@ -486,11 +486,11 @@ func isDataURI(fl FieldLevel) bool { return false } - if !dataURIRegex.MatchString(uri[0]) { + if !dataURIRegex().MatchString(uri[0]) { return false } - return base64Regex.MatchString(uri[1]) + return base64Regex().MatchString(uri[1]) } // hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. @@ -501,17 +501,17 @@ func hasMultiByteCharacter(fl FieldLevel) bool { return true } - return multibyteRegex.MatchString(field.String()) + return multibyteRegex().MatchString(field.String()) } // isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. func isPrintableASCII(fl FieldLevel) bool { - return printableASCIIRegex.MatchString(fl.Field().String()) + return printableASCIIRegex().MatchString(fl.Field().String()) } // isASCII is the validation function for validating if the field's value is a valid ASCII character. func isASCII(fl FieldLevel) bool { - return aSCIIRegex.MatchString(fl.Field().String()) + return aSCIIRegex().MatchString(fl.Field().String()) } // isUUID5 is the validation function for validating if the field's value is a valid v5 UUID. @@ -561,52 +561,52 @@ func isULID(fl FieldLevel) bool { // isMD4 is the validation function for validating if the field's value is a valid MD4. func isMD4(fl FieldLevel) bool { - return md4Regex.MatchString(fl.Field().String()) + return md4Regex().MatchString(fl.Field().String()) } // isMD5 is the validation function for validating if the field's value is a valid MD5. func isMD5(fl FieldLevel) bool { - return md5Regex.MatchString(fl.Field().String()) + return md5Regex().MatchString(fl.Field().String()) } // isSHA256 is the validation function for validating if the field's value is a valid SHA256. func isSHA256(fl FieldLevel) bool { - return sha256Regex.MatchString(fl.Field().String()) + return sha256Regex().MatchString(fl.Field().String()) } // isSHA384 is the validation function for validating if the field's value is a valid SHA384. func isSHA384(fl FieldLevel) bool { - return sha384Regex.MatchString(fl.Field().String()) + return sha384Regex().MatchString(fl.Field().String()) } // isSHA512 is the validation function for validating if the field's value is a valid SHA512. func isSHA512(fl FieldLevel) bool { - return sha512Regex.MatchString(fl.Field().String()) + return sha512Regex().MatchString(fl.Field().String()) } // isRIPEMD128 is the validation function for validating if the field's value is a valid PIPEMD128. func isRIPEMD128(fl FieldLevel) bool { - return ripemd128Regex.MatchString(fl.Field().String()) + return ripemd128Regex().MatchString(fl.Field().String()) } // isRIPEMD160 is the validation function for validating if the field's value is a valid PIPEMD160. func isRIPEMD160(fl FieldLevel) bool { - return ripemd160Regex.MatchString(fl.Field().String()) + return ripemd160Regex().MatchString(fl.Field().String()) } // isTIGER128 is the validation function for validating if the field's value is a valid TIGER128. func isTIGER128(fl FieldLevel) bool { - return tiger128Regex.MatchString(fl.Field().String()) + return tiger128Regex().MatchString(fl.Field().String()) } // isTIGER160 is the validation function for validating if the field's value is a valid TIGER160. func isTIGER160(fl FieldLevel) bool { - return tiger160Regex.MatchString(fl.Field().String()) + return tiger160Regex().MatchString(fl.Field().String()) } // isTIGER192 is the validation function for validating if the field's value is a valid isTIGER192. func isTIGER192(fl FieldLevel) bool { - return tiger192Regex.MatchString(fl.Field().String()) + return tiger192Regex().MatchString(fl.Field().String()) } // isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. @@ -618,7 +618,7 @@ func isISBN(fl FieldLevel) bool { func isISBN13(fl FieldLevel) bool { s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) - if !iSBN13Regex.MatchString(s) { + if !iSBN13Regex().MatchString(s) { return false } @@ -638,7 +638,7 @@ func isISBN13(fl FieldLevel) bool { func isISBN10(fl FieldLevel) bool { s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) - if !iSBN10Regex.MatchString(s) { + if !iSBN10Regex().MatchString(s) { return false } @@ -662,7 +662,7 @@ func isISBN10(fl FieldLevel) bool { func isISSN(fl FieldLevel) bool { s := fl.Field().String() - if !iSSNRegex.MatchString(s) { + if !iSSNRegex().MatchString(s) { return false } s = strings.ReplaceAll(s, "-", "") @@ -688,14 +688,14 @@ func isISSN(fl FieldLevel) bool { func isEthereumAddress(fl FieldLevel) bool { address := fl.Field().String() - return ethAddressRegex.MatchString(address) + return ethAddressRegex().MatchString(address) } // isEthereumAddressChecksum is the validation function for validating if the field's value is a valid checksummed Ethereum address. func isEthereumAddressChecksum(fl FieldLevel) bool { address := fl.Field().String() - if !ethAddressRegex.MatchString(address) { + if !ethAddressRegex().MatchString(address) { return false } // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md @@ -721,7 +721,7 @@ func isEthereumAddressChecksum(fl FieldLevel) bool { func isBitcoinAddress(fl FieldLevel) bool { address := fl.Field().String() - if !btcAddressRegex.MatchString(address) { + if !btcAddressRegex().MatchString(address) { return false } @@ -758,7 +758,7 @@ func isBitcoinAddress(fl FieldLevel) bool { func isBitcoinBech32Address(fl FieldLevel) bool { address := fl.Field().String() - if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) { + if !btcLowerAddressRegexBech32().MatchString(address) && !btcUpperAddressRegexBech32().MatchString(address) { return false } @@ -1370,6 +1370,7 @@ func isPostcodeByIso3166Alpha2(fl FieldLevel) bool { field := fl.Field() param := fl.Param() + postcodeRegexInit.Do(initPostcodes) reg, found := postCodeRegexDict[param] if !found { return false @@ -1407,22 +1408,22 @@ func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool { // isBase32 is the validation function for validating if the current field's value is a valid base 32. func isBase32(fl FieldLevel) bool { - return base32Regex.MatchString(fl.Field().String()) + return base32Regex().MatchString(fl.Field().String()) } // isBase64 is the validation function for validating if the current field's value is a valid base 64. func isBase64(fl FieldLevel) bool { - return base64Regex.MatchString(fl.Field().String()) + return base64Regex().MatchString(fl.Field().String()) } // isBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. func isBase64URL(fl FieldLevel) bool { - return base64URLRegex.MatchString(fl.Field().String()) + return base64URLRegex().MatchString(fl.Field().String()) } // isBase64RawURL is the validation function for validating if the current field's value is a valid base64 URL safe string without '=' padding. func isBase64RawURL(fl FieldLevel) bool { - return base64RawURLRegex.MatchString(fl.Field().String()) + return base64RawURLRegex().MatchString(fl.Field().String()) } // isURI is the validation function for validating if the current field's value is a valid URI. @@ -1668,42 +1669,42 @@ func isFilePath(fl FieldLevel) bool { // isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. func isE164(fl FieldLevel) bool { - return e164Regex.MatchString(fl.Field().String()) + return e164Regex().MatchString(fl.Field().String()) } // isEmail is the validation function for validating if the current field's value is a valid email address. func isEmail(fl FieldLevel) bool { - return emailRegex.MatchString(fl.Field().String()) + return emailRegex().MatchString(fl.Field().String()) } // isHSLA is the validation function for validating if the current field's value is a valid HSLA color. func isHSLA(fl FieldLevel) bool { - return hslaRegex.MatchString(fl.Field().String()) + return hslaRegex().MatchString(fl.Field().String()) } // isHSL is the validation function for validating if the current field's value is a valid HSL color. func isHSL(fl FieldLevel) bool { - return hslRegex.MatchString(fl.Field().String()) + return hslRegex().MatchString(fl.Field().String()) } // isRGBA is the validation function for validating if the current field's value is a valid RGBA color. func isRGBA(fl FieldLevel) bool { - return rgbaRegex.MatchString(fl.Field().String()) + return rgbaRegex().MatchString(fl.Field().String()) } // isRGB is the validation function for validating if the current field's value is a valid RGB color. func isRGB(fl FieldLevel) bool { - return rgbRegex.MatchString(fl.Field().String()) + return rgbRegex().MatchString(fl.Field().String()) } // isHEXColor is the validation function for validating if the current field's value is a valid HEX color. func isHEXColor(fl FieldLevel) bool { - return hexColorRegex.MatchString(fl.Field().String()) + return hexColorRegex().MatchString(fl.Field().String()) } // isHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. func isHexadecimal(fl FieldLevel) bool { - return hexadecimalRegex.MatchString(fl.Field().String()) + return hexadecimalRegex().MatchString(fl.Field().String()) } // isNumber is the validation function for validating if the current field's value is a valid number. @@ -1712,7 +1713,7 @@ func isNumber(fl FieldLevel) bool { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: return true default: - return numberRegex.MatchString(fl.Field().String()) + return numberRegex().MatchString(fl.Field().String()) } } @@ -1722,28 +1723,28 @@ func isNumeric(fl FieldLevel) bool { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: return true default: - return numericRegex.MatchString(fl.Field().String()) + return numericRegex().MatchString(fl.Field().String()) } } // isAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. func isAlphanum(fl FieldLevel) bool { - return alphaNumericRegex.MatchString(fl.Field().String()) + return alphaNumericRegex().MatchString(fl.Field().String()) } // isAlpha is the validation function for validating if the current field's value is a valid alpha value. func isAlpha(fl FieldLevel) bool { - return alphaRegex.MatchString(fl.Field().String()) + return alphaRegex().MatchString(fl.Field().String()) } // isAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. func isAlphanumUnicode(fl FieldLevel) bool { - return alphaUnicodeNumericRegex.MatchString(fl.Field().String()) + return alphaUnicodeNumericRegex().MatchString(fl.Field().String()) } // isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. func isAlphaUnicode(fl FieldLevel) bool { - return alphaUnicodeRegex.MatchString(fl.Field().String()) + return alphaUnicodeRegex().MatchString(fl.Field().String()) } // isBoolean is the validation function for validating if the current field's value is a valid boolean value or can be safely converted to a boolean value. @@ -2566,11 +2567,11 @@ func isIP6Addr(fl FieldLevel) bool { } func isHostnameRFC952(fl FieldLevel) bool { - return hostnameRegexRFC952.MatchString(fl.Field().String()) + return hostnameRegexRFC952().MatchString(fl.Field().String()) } func isHostnameRFC1123(fl FieldLevel) bool { - return hostnameRegexRFC1123.MatchString(fl.Field().String()) + return hostnameRegexRFC1123().MatchString(fl.Field().String()) } func isFQDN(fl FieldLevel) bool { @@ -2580,7 +2581,7 @@ func isFQDN(fl FieldLevel) bool { return false } - return fqdnRegexRFC1123.MatchString(val) + return fqdnRegexRFC1123().MatchString(val) } // isDir is the validation function for validating if the current field's value is a valid existing directory. @@ -2679,7 +2680,7 @@ func isJSON(fl FieldLevel) bool { // isJWT is the validation function for validating if the current field's value is a valid JWT string. func isJWT(fl FieldLevel) bool { - return jWTRegex.MatchString(fl.Field().String()) + return jWTRegex().MatchString(fl.Field().String()) } // isHostnamePort validates a : combination for fields typically used for socket address. @@ -2698,7 +2699,7 @@ func isHostnamePort(fl FieldLevel) bool { // If host is specified, it should match a DNS name if host != "" { - return hostnameRegexRFC1123.MatchString(host) + return hostnameRegexRFC1123().MatchString(host) } return true } @@ -2885,21 +2886,21 @@ func isBCP47LanguageTag(fl FieldLevel) bool { func isIsoBicFormat(fl FieldLevel) bool { bicString := fl.Field().String() - return bicRegex.MatchString(bicString) + return bicRegex().MatchString(bicString) } // isSemverFormat is the validation function for validating if the current field's value is a valid semver version, defined in Semantic Versioning 2.0.0 func isSemverFormat(fl FieldLevel) bool { semverString := fl.Field().String() - return semverRegex.MatchString(semverString) + return semverRegex().MatchString(semverString) } // isCveFormat is the validation function for validating if the current field's value is a valid cve id, defined in CVE mitre org func isCveFormat(fl FieldLevel) bool { cveString := fl.Field().String() - return cveRegex.MatchString(cveString) + return cveRegex().MatchString(cveString) } // isDnsRFC1035LabelFormat is the validation function @@ -2907,7 +2908,7 @@ func isCveFormat(fl FieldLevel) bool { // a valid dns RFC 1035 label, defined in RFC 1035. func isDnsRFC1035LabelFormat(fl FieldLevel) bool { val := fl.Field().String() - return dnsRegexRFC1035Label.MatchString(val) + return dnsRegexRFC1035Label().MatchString(val) } // digitsHaveLuhnChecksum returns true if and only if the last element of the given digits slice is the Luhn checksum of the previous elements @@ -2936,13 +2937,13 @@ func digitsHaveLuhnChecksum(digits []string) bool { // isMongoDBObjectId is the validation function for validating if the current field's value is valid MongoDB ObjectID func isMongoDBObjectId(fl FieldLevel) bool { val := fl.Field().String() - return mongodbIdRegex.MatchString(val) + return mongodbIdRegex().MatchString(val) } // isMongoDBConnectionString is the validation function for validating if the current field's value is valid MongoDB Connection String func isMongoDBConnectionString(fl FieldLevel) bool { val := fl.Field().String() - return mongodbConnectionRegex.MatchString(val) + return mongodbConnectionRegex().MatchString(val) } // isSpiceDB is the validation function for validating if the current field's value is valid for use with Authzed SpiceDB in the indicated way @@ -2952,11 +2953,11 @@ func isSpiceDB(fl FieldLevel) bool { switch param { case "permission": - return spicedbPermissionRegex.MatchString(val) + return spicedbPermissionRegex().MatchString(val) case "type": - return spicedbTypeRegex.MatchString(val) + return spicedbTypeRegex().MatchString(val) case "id", "": - return spicedbIDRegex.MatchString(val) + return spicedbIDRegex().MatchString(val) } panic("Unrecognized parameter: " + param) @@ -3008,5 +3009,5 @@ func hasLuhnChecksum(fl FieldLevel) bool { // isCron is the validation function for validating if the current field's value is a valid cron expression func isCron(fl FieldLevel) bool { cronString := fl.Field().String() - return cronRegex.MatchString(cronString) + return cronRegex().MatchString(cronString) } diff --git a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go index e7e7b687..326b8f75 100644 --- a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go +++ b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go @@ -1,6 +1,9 @@ package validator -import "regexp" +import ( + "regexp" + "sync" +) var postCodePatternDict = map[string]string{ "GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`, @@ -164,9 +167,12 @@ var postCodePatternDict = map[string]string{ "YT": `^976\d{2}$`, } -var postCodeRegexDict = map[string]*regexp.Regexp{} +var ( + postcodeRegexInit sync.Once + postCodeRegexDict = map[string]*regexp.Regexp{} +) -func init() { +func initPostcodes() { for countryCode, pattern := range postCodePatternDict { postCodeRegexDict[countryCode] = regexp.MustCompile(pattern) } diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go index 5620fd78..7e1dd5a0 100644 --- a/vendor/github.com/go-playground/validator/v10/regexes.go +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -1,6 +1,9 @@ package validator -import "regexp" +import ( + "regexp" + "sync" +) const ( alphaRegexString = "^[a-zA-Z]+$" @@ -76,74 +79,85 @@ const ( spicedbTypeRegexString = "^([a-z][a-z0-9_]{1,61}[a-z0-9]/)?[a-z][a-z0-9_]{1,62}[a-z0-9]$" ) +func lazyRegexCompile(str string) func() *regexp.Regexp { + var regex *regexp.Regexp + var once sync.Once + return func() *regexp.Regexp { + once.Do(func() { + regex = regexp.MustCompile(str) + }) + return regex + } +} + var ( - alphaRegex = regexp.MustCompile(alphaRegexString) - alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) - alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString) - alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString) - numericRegex = regexp.MustCompile(numericRegexString) - numberRegex = regexp.MustCompile(numberRegexString) - hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) - hexColorRegex = regexp.MustCompile(hexColorRegexString) - rgbRegex = regexp.MustCompile(rgbRegexString) - rgbaRegex = regexp.MustCompile(rgbaRegexString) - hslRegex = regexp.MustCompile(hslRegexString) - hslaRegex = regexp.MustCompile(hslaRegexString) - e164Regex = regexp.MustCompile(e164RegexString) - emailRegex = regexp.MustCompile(emailRegexString) - base32Regex = regexp.MustCompile(base32RegexString) - base64Regex = regexp.MustCompile(base64RegexString) - base64URLRegex = regexp.MustCompile(base64URLRegexString) - base64RawURLRegex = regexp.MustCompile(base64RawURLRegexString) - iSBN10Regex = regexp.MustCompile(iSBN10RegexString) - iSBN13Regex = regexp.MustCompile(iSBN13RegexString) - iSSNRegex = regexp.MustCompile(iSSNRegexString) - uUID3Regex = regexp.MustCompile(uUID3RegexString) - uUID4Regex = regexp.MustCompile(uUID4RegexString) - uUID5Regex = regexp.MustCompile(uUID5RegexString) - uUIDRegex = regexp.MustCompile(uUIDRegexString) - uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString) - uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString) - uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString) - uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString) - uLIDRegex = regexp.MustCompile(uLIDRegexString) - md4Regex = regexp.MustCompile(md4RegexString) - md5Regex = regexp.MustCompile(md5RegexString) - sha256Regex = regexp.MustCompile(sha256RegexString) - sha384Regex = regexp.MustCompile(sha384RegexString) - sha512Regex = regexp.MustCompile(sha512RegexString) - ripemd128Regex = regexp.MustCompile(ripemd128RegexString) - ripemd160Regex = regexp.MustCompile(ripemd160RegexString) - tiger128Regex = regexp.MustCompile(tiger128RegexString) - tiger160Regex = regexp.MustCompile(tiger160RegexString) - tiger192Regex = regexp.MustCompile(tiger192RegexString) - aSCIIRegex = regexp.MustCompile(aSCIIRegexString) - printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) - multibyteRegex = regexp.MustCompile(multibyteRegexString) - dataURIRegex = regexp.MustCompile(dataURIRegexString) - latitudeRegex = regexp.MustCompile(latitudeRegexString) - longitudeRegex = regexp.MustCompile(longitudeRegexString) - sSNRegex = regexp.MustCompile(sSNRegexString) - hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952) - hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123) - fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123) - btcAddressRegex = regexp.MustCompile(btcAddressRegexString) - btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32) - btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32) - ethAddressRegex = regexp.MustCompile(ethAddressRegexString) - uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString) - hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString) - hTMLRegex = regexp.MustCompile(hTMLRegexString) - jWTRegex = regexp.MustCompile(jWTRegexString) - splitParamsRegex = regexp.MustCompile(splitParamsRegexString) - bicRegex = regexp.MustCompile(bicRegexString) - semverRegex = regexp.MustCompile(semverRegexString) - dnsRegexRFC1035Label = regexp.MustCompile(dnsRegexStringRFC1035Label) - cveRegex = regexp.MustCompile(cveRegexString) - mongodbIdRegex = regexp.MustCompile(mongodbIdRegexString) - mongodbConnectionRegex = regexp.MustCompile(mongodbConnStringRegexString) - cronRegex = regexp.MustCompile(cronRegexString) - spicedbIDRegex = regexp.MustCompile(spicedbIDRegexString) - spicedbPermissionRegex = regexp.MustCompile(spicedbPermissionRegexString) - spicedbTypeRegex = regexp.MustCompile(spicedbTypeRegexString) + alphaRegex = lazyRegexCompile(alphaRegexString) + alphaNumericRegex = lazyRegexCompile(alphaNumericRegexString) + alphaUnicodeRegex = lazyRegexCompile(alphaUnicodeRegexString) + alphaUnicodeNumericRegex = lazyRegexCompile(alphaUnicodeNumericRegexString) + numericRegex = lazyRegexCompile(numericRegexString) + numberRegex = lazyRegexCompile(numberRegexString) + hexadecimalRegex = lazyRegexCompile(hexadecimalRegexString) + hexColorRegex = lazyRegexCompile(hexColorRegexString) + rgbRegex = lazyRegexCompile(rgbRegexString) + rgbaRegex = lazyRegexCompile(rgbaRegexString) + hslRegex = lazyRegexCompile(hslRegexString) + hslaRegex = lazyRegexCompile(hslaRegexString) + e164Regex = lazyRegexCompile(e164RegexString) + emailRegex = lazyRegexCompile(emailRegexString) + base32Regex = lazyRegexCompile(base32RegexString) + base64Regex = lazyRegexCompile(base64RegexString) + base64URLRegex = lazyRegexCompile(base64URLRegexString) + base64RawURLRegex = lazyRegexCompile(base64RawURLRegexString) + iSBN10Regex = lazyRegexCompile(iSBN10RegexString) + iSBN13Regex = lazyRegexCompile(iSBN13RegexString) + iSSNRegex = lazyRegexCompile(iSSNRegexString) + uUID3Regex = lazyRegexCompile(uUID3RegexString) + uUID4Regex = lazyRegexCompile(uUID4RegexString) + uUID5Regex = lazyRegexCompile(uUID5RegexString) + uUIDRegex = lazyRegexCompile(uUIDRegexString) + uUID3RFC4122Regex = lazyRegexCompile(uUID3RFC4122RegexString) + uUID4RFC4122Regex = lazyRegexCompile(uUID4RFC4122RegexString) + uUID5RFC4122Regex = lazyRegexCompile(uUID5RFC4122RegexString) + uUIDRFC4122Regex = lazyRegexCompile(uUIDRFC4122RegexString) + uLIDRegex = lazyRegexCompile(uLIDRegexString) + md4Regex = lazyRegexCompile(md4RegexString) + md5Regex = lazyRegexCompile(md5RegexString) + sha256Regex = lazyRegexCompile(sha256RegexString) + sha384Regex = lazyRegexCompile(sha384RegexString) + sha512Regex = lazyRegexCompile(sha512RegexString) + ripemd128Regex = lazyRegexCompile(ripemd128RegexString) + ripemd160Regex = lazyRegexCompile(ripemd160RegexString) + tiger128Regex = lazyRegexCompile(tiger128RegexString) + tiger160Regex = lazyRegexCompile(tiger160RegexString) + tiger192Regex = lazyRegexCompile(tiger192RegexString) + aSCIIRegex = lazyRegexCompile(aSCIIRegexString) + printableASCIIRegex = lazyRegexCompile(printableASCIIRegexString) + multibyteRegex = lazyRegexCompile(multibyteRegexString) + dataURIRegex = lazyRegexCompile(dataURIRegexString) + latitudeRegex = lazyRegexCompile(latitudeRegexString) + longitudeRegex = lazyRegexCompile(longitudeRegexString) + sSNRegex = lazyRegexCompile(sSNRegexString) + hostnameRegexRFC952 = lazyRegexCompile(hostnameRegexStringRFC952) + hostnameRegexRFC1123 = lazyRegexCompile(hostnameRegexStringRFC1123) + fqdnRegexRFC1123 = lazyRegexCompile(fqdnRegexStringRFC1123) + btcAddressRegex = lazyRegexCompile(btcAddressRegexString) + btcUpperAddressRegexBech32 = lazyRegexCompile(btcAddressUpperRegexStringBech32) + btcLowerAddressRegexBech32 = lazyRegexCompile(btcAddressLowerRegexStringBech32) + ethAddressRegex = lazyRegexCompile(ethAddressRegexString) + uRLEncodedRegex = lazyRegexCompile(uRLEncodedRegexString) + hTMLEncodedRegex = lazyRegexCompile(hTMLEncodedRegexString) + hTMLRegex = lazyRegexCompile(hTMLRegexString) + jWTRegex = lazyRegexCompile(jWTRegexString) + splitParamsRegex = lazyRegexCompile(splitParamsRegexString) + bicRegex = lazyRegexCompile(bicRegexString) + semverRegex = lazyRegexCompile(semverRegexString) + dnsRegexRFC1035Label = lazyRegexCompile(dnsRegexStringRFC1035Label) + cveRegex = lazyRegexCompile(cveRegexString) + mongodbIdRegex = lazyRegexCompile(mongodbIdRegexString) + mongodbConnectionRegex = lazyRegexCompile(mongodbConnStringRegexString) + cronRegex = lazyRegexCompile(cronRegexString) + spicedbIDRegex = lazyRegexCompile(spicedbIDRegexString) + spicedbPermissionRegex = lazyRegexCompile(spicedbPermissionRegexString) + spicedbTypeRegex = lazyRegexCompile(spicedbTypeRegexString) ) diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go index fc8f8b13..9285223a 100644 --- a/vendor/github.com/go-playground/validator/v10/util.go +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -297,7 +297,8 @@ func panicIf(err error) { // Checks if field value matches regex. If fl.Field can be cast to Stringer, it uses the Stringer interfaces // String() return value. Otherwise, it uses fl.Field's String() value. -func fieldMatchesRegexByStringerValOrString(regex *regexp.Regexp, fl FieldLevel) bool { +func fieldMatchesRegexByStringerValOrString(regexFn func() *regexp.Regexp, fl FieldLevel) bool { + regex := regexFn() switch fl.Field().Kind() { case reflect.String: return regex.MatchString(fl.Field().String()) diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig deleted file mode 100644 index 2940ec92..00000000 --- a/vendor/github.com/gorilla/websocket/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index 84039fec..cd3fcd1e 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1 +1,25 @@ -coverage.coverprofile +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml deleted file mode 100644 index 34882139..00000000 --- a/vendor/github.com/gorilla/websocket/.golangci.yml +++ /dev/null @@ -1,3 +0,0 @@ -run: - skip-dirs: - - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 00000000..1931f400 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index bb9d80bc..9171c972 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,27 +1,22 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile deleted file mode 100644 index 603a63f5..00000000 --- a/vendor/github.com/gorilla/websocket/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: golangci-lint -golangci-lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint" - golangci-lint run -v - -.PHONY: gosec -gosec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec" - gosec -exclude-dir examples ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck" - govulncheck ./... - -.PHONY: verify -verify: golangci-lint gosec govulncheck - -.PHONY: test -test: - @echo "##### Running tests" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 1fd5e9c4..d33ed7fd 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,13 +1,10 @@ -# gorilla/websocket +# Gorilla WebSocket -![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) -[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) -[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. ### Documentation @@ -17,7 +14,6 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) -* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) ### Status @@ -34,3 +30,4 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 815b0ca5..04fdafee 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -11,16 +11,13 @@ import ( "errors" "fmt" "io" - "log" - + "io/ioutil" "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" - - "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -228,7 +225,6 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || - //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -294,9 +290,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } err = c.SetDeadline(deadline) if err != nil { - if err := c.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + c.Close() return nil, err } return c, nil @@ -310,7 +304,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return nil, nil, err } if proxyURL != nil { - dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } @@ -336,9 +330,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h defer func() { if netConn != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() } }() @@ -408,7 +400,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) - resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } @@ -426,19 +418,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h break } - resp.Body = io.NopCloser(bytes.NewReader([]byte{})) + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - if err := netConn.SetDeadline(time.Time{}); err != nil { - return nil, nil, err - } + netConn.SetDeadline(time.Time{}) netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{MinVersion: tls.VersionTLS12} + return &tls.Config{} } return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 9fed0ef5..813ffb1e 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,7 +8,6 @@ import ( "compress/flate" "errors" "io" - "log" "strings" "sync" ) @@ -34,9 +33,7 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { - panic(err) - } + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) return &flateReadWrapper{fr} } @@ -135,9 +132,7 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - if err := r.Close(); err != nil { - log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) - } + r.Close() } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 221e6cf7..5161ef81 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -6,11 +6,11 @@ package websocket import ( "bufio" - "crypto/rand" "encoding/binary" "errors" "io" - "log" + "io/ioutil" + "math/rand" "net" "strconv" "strings" @@ -181,20 +181,13 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) -// maskRand is an io.Reader for generating mask bytes. The reader is initialized -// to crypto/rand Reader. Tests swap the reader to a math/rand reader for -// reproducible results. -var maskRand = rand.Reader - -// newMaskKey returns a new 32 bit value for masking client frames. func newMaskKey() [4]byte { - var k [4]byte - _, _ = io.ReadFull(maskRand, k[:]) - return k + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} } func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok { + if e, ok := err.(net.Error); ok && e.Temporary() { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err @@ -379,9 +372,7 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - if _, err := c.br.Discard(len(p)); err != nil { - return p, err - } + c.br.Discard(len(p)) return p, err } @@ -396,9 +387,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return err } - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } + c.conn.SetWriteDeadline(deadline) if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { @@ -408,7 +397,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return c.writeFatal(err) } if frameType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) + c.writeFatal(ErrCloseSent) } return nil } @@ -449,7 +438,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er d := 1000 * time.Hour if !deadline.IsZero() { - d = time.Until(deadline) + d = deadline.Sub(time.Now()) if d < 0 { return errWriteTimeout } @@ -471,15 +460,13 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } + c.conn.SetWriteDeadline(deadline) _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) + c.writeFatal(ErrCloseSent) } return err } @@ -490,9 +477,7 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - if err := c.writer.Close(); err != nil { - log.Printf("websocket: discarding writer close error: %v", err) - } + c.writer.Close() c.writer = nil } @@ -645,7 +630,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { } if final { - _ = w.endMessage(errWriteClosed) + w.endMessage(errWriteClosed) return nil } @@ -810,7 +795,7 @@ func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { - if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } @@ -832,9 +817,7 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { - return noFrame, err - } + c.setReadRemaining(int64(p[1] & 0x7f)) c.readDecompress = false if rsv1 { @@ -939,9 +922,7 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { - return noFrame, err - } + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) return noFrame, ErrReadLimit } @@ -953,9 +934,7 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - if err := c.setReadRemaining(0); err != nil { - return noFrame, err - } + c.setReadRemaining(0) if err != nil { return noFrame, err } @@ -1002,9 +981,7 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { - return err - } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) return errors.New("websocket: " + message) } @@ -1021,9 +998,7 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - if err := c.reader.Close(); err != nil { - log.Printf("websocket: discarding reader close error: %v", err) - } + c.reader.Close() c.reader = nil } @@ -1079,9 +1054,7 @@ func (r *messageReader) Read(b []byte) (int, error) { } rem := c.readRemaining rem -= int64(n) - if err := c.setReadRemaining(rem); err != nil { - return 0, err - } + c.setReadRemaining(rem) if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1121,7 +1094,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { if err != nil { return messageType, nil, err } - p, err = io.ReadAll(r) + p, err = ioutil.ReadAll(r) return messageType, p, err } @@ -1163,9 +1136,7 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { - return err - } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) return nil } } @@ -1190,7 +1161,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil - } else if _, ok := err.(net.Error); ok { + } else if e, ok := err.(net.Error); ok && e.Temporary() { return nil } return err diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index 67d0968b..d0742bf2 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,7 +9,6 @@ package websocket import "unsafe" -// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -23,7 +22,6 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. - //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -38,13 +36,11 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } - //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { - //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index 80f55d1e..e0f466b7 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -8,13 +8,10 @@ import ( "bufio" "encoding/base64" "errors" - "log" "net" "net/http" "net/url" "strings" - - "golang.org/x/net/proxy" ) type netDialerFunc func(network, addr string) (net.Conn, error) @@ -24,7 +21,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { } func init() { - proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } @@ -58,9 +55,7 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } @@ -69,16 +64,12 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() return nil, err } if resp.StatusCode != 200 { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } + conn.Close() f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 1e720e1d..bb335974 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -8,7 +8,6 @@ import ( "bufio" "errors" "io" - "log" "net/http" "net/url" "strings" @@ -184,9 +183,7 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if brw.Reader.Buffered() > 0 { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() return nil, errors.New("websocket: client sent data before handshake is complete") } @@ -251,34 +248,17 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. - if err := netConn.SetDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetDeadline(time.Time{}) if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) } if _, err = netConn.Write(p); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } + netConn.Close() return nil, err } if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } + netConn.SetWriteDeadline(time.Time{}) } return c, nil @@ -376,12 +356,8 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) - if err := bw.WriteByte(0); err != nil { - panic(err) - } - if err := bw.Flush(); err != nil { - log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) - } + bw.WriteByte(0) + bw.Flush() bw.Reset(originalWriter) diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go index 7f386453..a62b68cc 100644 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -1,3 +1,6 @@ +//go:build go1.17 +// +build go1.17 + package websocket import ( diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 00000000..e1b2b44f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 9b1a629b..31a5dee6 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + "crypto/sha1" "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + h := sha1.New() h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 00000000..2e668f6b --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/raft/.gitmodules b/vendor/github.com/hashicorp/raft/.gitmodules new file mode 100644 index 00000000..cbcd5cc9 --- /dev/null +++ b/vendor/github.com/hashicorp/raft/.gitmodules @@ -0,0 +1,3 @@ +[submodule "raft-compat/raft-latest"] + path = raft-compat/raft-previous-version + url = https://github.com/hashicorp/raft.git diff --git a/vendor/github.com/hashicorp/raft/CHANGELOG.md b/vendor/github.com/hashicorp/raft/CHANGELOG.md index d77609f7..b0fef7eb 100644 --- a/vendor/github.com/hashicorp/raft/CHANGELOG.md +++ b/vendor/github.com/hashicorp/raft/CHANGELOG.md @@ -1,5 +1,34 @@ # UNRELEASED +# 1.7.0 (June 5th, 2024) + +CHANGES + +* Raft multi version testing [GH-559](https://github.com/hashicorp/raft/pull/559) + +IMPROVEMENTS + +* Raft pre-vote extension implementation, activated by default. [GH-530](https://github.com/hashicorp/raft/pull/530) + +BUG FIXES + +* Fix serialize NetworkTransport data race on ServerAddr(). [GH-591](https://github.com/hashicorp/raft/pull/591) + +# 1.6.1 (January 8th, 2024) + +CHANGES + +* Add reference use of Hashicorp Raft. [GH-584](https://github.com/hashicorp/raft/pull/584) +* [COMPLIANCE] Add Copyright and License Headers. [GH-580](https://github.com/hashicorp/raft/pull/580) + +IMPROVEMENTS + +* Bump github.com/hashicorp/go-hclog from 1.5.0 to 1.6.2. [GH-583](https://github.com/hashicorp/raft/pull/583) + +BUG FIXES + +* Fix rare leadership transfer failures when writes happen during transfer. [GH-581](https://github.com/hashicorp/raft/pull/581) + # 1.6.0 (November 15th, 2023) CHANGES diff --git a/vendor/github.com/hashicorp/raft/api.go b/vendor/github.com/hashicorp/raft/api.go index 01f573ce..cff2eaac 100644 --- a/vendor/github.com/hashicorp/raft/api.go +++ b/vendor/github.com/hashicorp/raft/api.go @@ -213,6 +213,10 @@ type Raft struct { // mainThreadSaturation measures the saturation of the main raft goroutine. mainThreadSaturation *saturationMetric + + // preVoteDisabled control if the pre-vote feature is activated, + // prevote feature is disabled if set to true. + preVoteDisabled bool } // BootstrapCluster initializes a server's storage with the given cluster @@ -531,6 +535,7 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna applyCh = make(chan *logFuture, conf.MaxAppendEntries) } + _, transportSupportPreVote := trans.(WithPreVote) // Create Raft struct. r := &Raft{ protocolVersion: protocolVersion, @@ -560,6 +565,10 @@ func NewRaft(conf *Config, fsm FSM, logs LogStore, stable StableStore, snaps Sna leaderNotifyCh: make(chan struct{}, 1), followerNotifyCh: make(chan struct{}, 1), mainThreadSaturation: newSaturationMetric([]string{"raft", "thread", "main", "saturation"}, 1*time.Second), + preVoteDisabled: conf.PreVoteDisabled || !transportSupportPreVote, + } + if !transportSupportPreVote && !conf.PreVoteDisabled { + r.logger.Warn("pre-vote is disabled because it is not supported by the Transport") } r.conf.Store(*conf) diff --git a/vendor/github.com/hashicorp/raft/commands.go b/vendor/github.com/hashicorp/raft/commands.go index 1a74e052..1ec76cb2 100644 --- a/vendor/github.com/hashicorp/raft/commands.go +++ b/vendor/github.com/hashicorp/raft/commands.go @@ -120,6 +120,40 @@ func (r *RequestVoteResponse) GetRPCHeader() RPCHeader { return r.RPCHeader } +// RequestPreVoteRequest is the command used by a candidate to ask a Raft peer +// for a vote in an election. +type RequestPreVoteRequest struct { + RPCHeader + + // Provide the term and our id + Term uint64 + + // Used to ensure safety + LastLogIndex uint64 + LastLogTerm uint64 +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestPreVoteRequest) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + +// RequestPreVoteResponse is the response returned from a RequestPreVoteRequest. +type RequestPreVoteResponse struct { + RPCHeader + + // Newer term if leader is out of date. + Term uint64 + + // Is the vote granted. + Granted bool +} + +// GetRPCHeader - See WithRPCHeader. +func (r *RequestPreVoteResponse) GetRPCHeader() RPCHeader { + return r.RPCHeader +} + // InstallSnapshotRequest is the command sent to a Raft peer to bootstrap its // log (and state machine) from a snapshot on another peer. type InstallSnapshotRequest struct { diff --git a/vendor/github.com/hashicorp/raft/config.go b/vendor/github.com/hashicorp/raft/config.go index b97b4338..d14392fc 100644 --- a/vendor/github.com/hashicorp/raft/config.go +++ b/vendor/github.com/hashicorp/raft/config.go @@ -232,6 +232,9 @@ type Config struct { // raft's configuration and index values. NoSnapshotRestoreOnStart bool + // PreVoteDisabled deactivate the pre-vote feature when set to true + PreVoteDisabled bool + // skipStartup allows NewRaft() to bypass all background work goroutines skipStartup bool } diff --git a/vendor/github.com/hashicorp/raft/inmem_transport.go b/vendor/github.com/hashicorp/raft/inmem_transport.go index 5d9365b7..561ba73d 100644 --- a/vendor/github.com/hashicorp/raft/inmem_transport.go +++ b/vendor/github.com/hashicorp/raft/inmem_transport.go @@ -125,6 +125,18 @@ func (i *InmemTransport) RequestVote(id ServerID, target ServerAddress, args *Re return nil } +func (i *InmemTransport) RequestPreVote(id ServerID, target ServerAddress, args *RequestPreVoteRequest, resp *RequestPreVoteResponse) error { + rpcResp, err := i.makeRPC(target, args, nil, i.timeout) + if err != nil { + return err + } + + // Copy the result back + out := rpcResp.Response.(*RequestPreVoteResponse) + *resp = *out + return nil +} + // InstallSnapshot implements the Transport interface. func (i *InmemTransport) InstallSnapshot(id ServerID, target ServerAddress, args *InstallSnapshotRequest, resp *InstallSnapshotResponse, data io.Reader) error { rpcResp, err := i.makeRPC(target, args, data, 10*i.timeout) diff --git a/vendor/github.com/hashicorp/raft/net_transport.go b/vendor/github.com/hashicorp/raft/net_transport.go index bc34285e..1bac17d6 100644 --- a/vendor/github.com/hashicorp/raft/net_transport.go +++ b/vendor/github.com/hashicorp/raft/net_transport.go @@ -24,6 +24,7 @@ const ( rpcRequestVote rpcInstallSnapshot rpcTimeoutNow + rpcRequestPreVote // DefaultTimeoutScale is the default TimeoutScale in a NetworkTransport. DefaultTimeoutScale = 256 * 1024 // 256KB @@ -91,6 +92,7 @@ type NetworkTransport struct { maxPool int maxInFlight int + serverAddressLock sync.RWMutex serverAddressProvider ServerAddressProvider shutdown bool @@ -384,6 +386,8 @@ func (n *NetworkTransport) getConnFromAddressProvider(id ServerID, target Server } func (n *NetworkTransport) getProviderAddressOrFallback(id ServerID, target ServerAddress) ServerAddress { + n.serverAddressLock.RLock() + defer n.serverAddressLock.RUnlock() if n.serverAddressProvider != nil { serverAddressOverride, err := n.serverAddressProvider.ServerAddr(id) if err != nil { @@ -470,6 +474,11 @@ func (n *NetworkTransport) RequestVote(id ServerID, target ServerAddress, args * return n.genericRPC(id, target, rpcRequestVote, args, resp) } +// RequestPreVote implements the Transport interface. +func (n *NetworkTransport) RequestPreVote(id ServerID, target ServerAddress, args *RequestPreVoteRequest, resp *RequestPreVoteResponse) error { + return n.genericRPC(id, target, rpcRequestPreVote, args, resp) +} + // genericRPC handles a simple request/response RPC. func (n *NetworkTransport) genericRPC(id ServerID, target ServerAddress, rpcType uint8, args interface{}, resp interface{}) error { // Get a conn @@ -682,6 +691,13 @@ func (n *NetworkTransport) handleCommand(r *bufio.Reader, dec *codec.Decoder, en } rpc.Command = &req labels = []metrics.Label{{Name: "rpcType", Value: "RequestVote"}} + case rpcRequestPreVote: + var req RequestPreVoteRequest + if err := dec.Decode(&req); err != nil { + return err + } + rpc.Command = &req + labels = []metrics.Label{{Name: "rpcType", Value: "RequestPreVote"}} case rpcInstallSnapshot: var req InstallSnapshotRequest if err := dec.Decode(&req); err != nil { diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go index 28c11283..183f041a 100644 --- a/vendor/github.com/hashicorp/raft/raft.go +++ b/vendor/github.com/hashicorp/raft/raft.go @@ -8,6 +8,7 @@ import ( "container/list" "fmt" "io" + "strings" "sync/atomic" "time" @@ -17,8 +18,9 @@ import ( ) const ( - minCheckInterval = 10 * time.Millisecond - oldestLogGaugeInterval = 10 * time.Second + minCheckInterval = 10 * time.Millisecond + oldestLogGaugeInterval = 10 * time.Second + rpcUnexpectedCommandError = "unexpected command" ) var ( @@ -286,7 +288,16 @@ func (r *Raft) runCandidate() { metrics.IncrCounter([]string{"raft", "state", "candidate"}, 1) // Start vote for us, and set a timeout - voteCh := r.electSelf() + var voteCh <-chan *voteResult + var prevoteCh <-chan *preVoteResult + + // check if pre-vote is active and that this is not a leader transfer. + // Leader transfer do not perform prevote by design + if !r.preVoteDisabled && !r.candidateFromLeadershipTransfer.Load() { + prevoteCh = r.preElectSelf() + } else { + voteCh = r.electSelf() + } // Make sure the leadership transfer flag is reset after each run. Having this // flag will set the field LeadershipTransfer in a RequestVoteRequst to true, @@ -299,6 +310,8 @@ func (r *Raft) runCandidate() { electionTimer := randomTimeout(electionTimeout) // Tally the votes, need a simple majority + preVoteGrantedVotes := 0 + preVoteRefusedVotes := 0 grantedVotes := 0 votesNeeded := r.quorumSize() r.logger.Debug("calculated votes needed", "needed", votesNeeded, "term", term) @@ -310,7 +323,43 @@ func (r *Raft) runCandidate() { case rpc := <-r.rpcCh: r.mainThreadSaturation.working() r.processRPC(rpc) + case preVote := <-prevoteCh: + // This a pre-vote case it should trigger a "real" election if the pre-vote is won. + r.mainThreadSaturation.working() + r.logger.Debug("pre-vote received", "from", preVote.voterID, "term", preVote.Term, "tally", preVoteGrantedVotes) + // Check if the term is greater than ours, bail + if preVote.Term > term { + r.logger.Debug("pre-vote denied: found newer term, falling back to follower", "term", preVote.Term) + r.setState(Follower) + r.setCurrentTerm(preVote.Term) + return + } + // Check if the preVote is granted + if preVote.Granted { + preVoteGrantedVotes++ + r.logger.Debug("pre-vote granted", "from", preVote.voterID, "term", preVote.Term, "tally", preVoteGrantedVotes) + } else { + preVoteRefusedVotes++ + r.logger.Debug("pre-vote denied", "from", preVote.voterID, "term", preVote.Term, "tally", preVoteGrantedVotes) + } + + // Check if we've won the pre-vote and proceed to election if so + if preVoteGrantedVotes >= votesNeeded { + r.logger.Info("pre-vote successful, starting election", "term", preVote.Term, + "tally", preVoteGrantedVotes, "refused", preVoteRefusedVotes, "votesNeeded", votesNeeded) + preVoteGrantedVotes = 0 + preVoteRefusedVotes = 0 + electionTimer = randomTimeout(electionTimeout) + prevoteCh = nil + voteCh = r.electSelf() + } + // Check if we've lost the pre-vote and wait for the election to timeout so we can do another time of + // prevote. + if preVoteRefusedVotes >= votesNeeded { + r.logger.Info("pre-vote campaign failed, waiting for election timeout", "term", preVote.Term, + "tally", preVoteGrantedVotes, "refused", preVoteRefusedVotes, "votesNeeded", votesNeeded) + } case vote := <-voteCh: r.mainThreadSaturation.working() // Check if the term is greater than ours, bail @@ -334,7 +383,6 @@ func (r *Raft) runCandidate() { r.setLeader(r.localAddr, r.localID) return } - case c := <-r.configurationChangeCh: r.mainThreadSaturation.working() // Reject any operations since we are not the leader @@ -1350,6 +1398,8 @@ func (r *Raft) processRPC(rpc RPC) { r.appendEntries(rpc, cmd) case *RequestVoteRequest: r.requestVote(rpc, cmd) + case *RequestPreVoteRequest: + r.requestPreVote(rpc, cmd) case *InstallSnapshotRequest: r.installSnapshot(rpc, cmd) case *TimeoutNowRequest: @@ -1357,7 +1407,8 @@ func (r *Raft) processRPC(rpc RPC) { default: r.logger.Error("got unexpected command", "command", hclog.Fmt("%#v", rpc.Command)) - rpc.Respond(nil, fmt.Errorf("unexpected command")) + + rpc.Respond(nil, fmt.Errorf(rpcUnexpectedCommandError)) } } @@ -1615,6 +1666,7 @@ func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { r.logger.Debug("lost leadership because received a requestVote with a newer term") r.setState(Follower) r.setCurrentTerm(req.Term) + resp.Term = req.Term } @@ -1680,6 +1732,82 @@ func (r *Raft) requestVote(rpc RPC, req *RequestVoteRequest) { r.setLastContact() } +// requestPreVote is invoked when we get a request Pre-Vote RPC call. +func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) { + defer metrics.MeasureSince([]string{"raft", "rpc", "requestVote"}, time.Now()) + r.observe(*req) + + // Setup a response + resp := &RequestPreVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: r.getCurrentTerm(), + Granted: false, + } + var rpcErr error + defer func() { + rpc.Respond(resp, rpcErr) + }() + + // Check if we have an existing leader [who's not the candidate] and also + var candidate ServerAddress + candidateID := ServerID(req.ID) + + // if the Servers list is empty that mean the cluster is very likely trying to bootstrap, + // Grant the vote + if len(r.configurations.latest.Servers) > 0 && !inConfiguration(r.configurations.latest, candidateID) { + r.logger.Warn("rejecting pre-vote request since node is not in configuration", + "from", candidate) + return + } + + if leaderAddr, leaderID := r.LeaderWithID(); leaderAddr != "" && leaderAddr != candidate { + r.logger.Warn("rejecting pre-vote request since we have a leader", + "from", candidate, + "leader", leaderAddr, + "leader-id", string(leaderID)) + return + } + + // Ignore an older term + if req.Term < r.getCurrentTerm() { + return + } + + if req.Term > r.getCurrentTerm() { + // continue processing here to possibly grant the pre-vote as in a "real" vote this will transition us to follower + r.logger.Debug("received a requestPreVote with a newer term, grant the pre-vote") + resp.Term = req.Term + } + + // if we get a request for a pre-vote from a nonVoter and the request term is higher, do not grant the Pre-Vote + // This could happen when a node, previously voter, is converted to non-voter + if len(r.configurations.latest.Servers) > 0 && !hasVote(r.configurations.latest, candidateID) { + r.logger.Warn("rejecting pre-vote request since node is not a voter", "from", candidate) + return + } + + // Reject if their term is older + lastIdx, lastTerm := r.getLastEntry() + if lastTerm > req.LastLogTerm { + r.logger.Warn("rejecting pre-vote request since our last term is greater", + "candidate", candidate, + "last-term", lastTerm, + "last-candidate-term", req.LastLogTerm) + return + } + + if lastTerm == req.LastLogTerm && lastIdx > req.LastLogIndex { + r.logger.Warn("rejecting pre-vote request since our last index is greater", + "candidate", candidate, + "last-index", lastIdx, + "last-candidate-index", req.LastLogIndex) + return + } + + resp.Granted = true + r.setLastContact() +} + // installSnapshot is invoked when we get a InstallSnapshot RPC call. // We must be in the follower state for this, since it means we are // too far behind a leader for log replay. This must only be called @@ -1837,6 +1965,11 @@ type voteResult struct { voterID ServerID } +type preVoteResult struct { + RequestPreVoteResponse + voterID ServerID +} + // electSelf is used to send a RequestVote RPC to all peers, and vote for // ourself. This has the side affecting of incrementing the current term. The // response channel returned is used to wait for all the responses (including a @@ -1846,13 +1979,14 @@ func (r *Raft) electSelf() <-chan *voteResult { respCh := make(chan *voteResult, len(r.configurations.latest.Servers)) // Increment the term - r.setCurrentTerm(r.getCurrentTerm() + 1) + newTerm := r.getCurrentTerm() + 1 + r.setCurrentTerm(newTerm) // Construct the request lastIdx, lastTerm := r.getLastEntry() req := &RequestVoteRequest{ RPCHeader: r.getRPCHeader(), - Term: r.getCurrentTerm(), + Term: newTerm, // this is needed for retro compatibility, before RPCHeader.Addr was added Candidate: r.trans.EncodePeer(r.localID, r.localAddr), LastLogIndex: lastIdx, @@ -1883,10 +2017,12 @@ func (r *Raft) electSelf() <-chan *voteResult { if server.Suffrage == Voter { if server.ID == r.localID { r.logger.Debug("voting for self", "term", req.Term, "id", r.localID) + // Persist a vote for ourselves if err := r.persistVote(req.Term, req.RPCHeader.Addr); err != nil { r.logger.Error("failed to persist vote", "error", err) return nil + } // Include our own vote respCh <- &voteResult{ @@ -1907,6 +2043,90 @@ func (r *Raft) electSelf() <-chan *voteResult { return respCh } +// preElectSelf is used to send a RequestPreVote RPC to all peers, and vote for +// ourself. This will not increment the current term. The +// response channel returned is used to wait for all the responses (including a +// vote for ourself). +// This must only be called from the main thread. +func (r *Raft) preElectSelf() <-chan *preVoteResult { + + // At this point transport should support pre-vote + // but check just in case + prevoteTrans, prevoteTransSupported := r.trans.(WithPreVote) + if !prevoteTransSupported { + panic("preElection is not possible if the transport don't support pre-vote") + } + + // Create a response channel + respCh := make(chan *preVoteResult, len(r.configurations.latest.Servers)) + + // Propose the next term without actually changing our state + newTerm := r.getCurrentTerm() + 1 + + // Construct the request + lastIdx, lastTerm := r.getLastEntry() + req := &RequestPreVoteRequest{ + RPCHeader: r.getRPCHeader(), + Term: newTerm, + LastLogIndex: lastIdx, + LastLogTerm: lastTerm, + } + + // Construct a function to ask for a vote + askPeer := func(peer Server) { + r.goFunc(func() { + defer metrics.MeasureSince([]string{"raft", "candidate", "preElectSelf"}, time.Now()) + resp := &preVoteResult{voterID: peer.ID} + + err := prevoteTrans.RequestPreVote(peer.ID, peer.Address, req, &resp.RequestPreVoteResponse) + + // If the target server do not support Pre-vote RPC we count this as a granted vote to allow + // the cluster to progress. + if err != nil && strings.Contains(err.Error(), rpcUnexpectedCommandError) { + r.logger.Error("target does not support pre-vote RPC, treating as granted", + "target", peer, + "error", err, + "term", req.Term) + resp.Term = req.Term + resp.Granted = true + } else if err != nil { + r.logger.Error("failed to make requestVote RPC", + "target", peer, + "error", err, + "term", req.Term) + resp.Term = req.Term + resp.Granted = false + } + respCh <- resp + + }) + } + + // For each peer, request a vote + for _, server := range r.configurations.latest.Servers { + if server.Suffrage == Voter { + if server.ID == r.localID { + r.logger.Debug("pre-voting for self", "term", req.Term, "id", r.localID) + + // cast a pre-vote for our self + respCh <- &preVoteResult{ + RequestPreVoteResponse: RequestPreVoteResponse{ + RPCHeader: r.getRPCHeader(), + Term: req.Term, + Granted: true, + }, + voterID: r.localID, + } + } else { + r.logger.Debug("asking for pre-vote", "term", req.Term, "from", server.ID, "address", server.Address) + askPeer(server) + } + } + } + + return respCh +} + // persistVote is used to persist our vote for safety. func (r *Raft) persistVote(term uint64, candidate []byte) error { if err := r.stable.SetUint64(keyLastVoteTerm, term); err != nil { diff --git a/vendor/github.com/hashicorp/raft/testing.go b/vendor/github.com/hashicorp/raft/testing.go index e0885714..351a9aba 100644 --- a/vendor/github.com/hashicorp/raft/testing.go +++ b/vendor/github.com/hashicorp/raft/testing.go @@ -21,13 +21,13 @@ import ( var userSnapshotErrorsOnNoData = true // Return configurations optimized for in-memory -func inmemConfig(tb testing.TB) *Config { +func inmemConfig(t testing.TB) *Config { conf := DefaultConfig() conf.HeartbeatTimeout = 50 * time.Millisecond conf.ElectionTimeout = 50 * time.Millisecond conf.LeaderLeaseTimeout = 50 * time.Millisecond conf.CommitTimeout = 5 * time.Millisecond - conf.Logger = newTestLogger(tb) + conf.Logger = newTestLogger(t) return conf } @@ -211,7 +211,7 @@ func newTestLogger(tb testing.TB) hclog.Logger { // is logged after the test is complete. func newTestLoggerWithPrefix(tb testing.TB, prefix string) hclog.Logger { if testing.Verbose() { - return hclog.New(&hclog.LoggerOptions{Name: prefix}) + return hclog.New(&hclog.LoggerOptions{Name: prefix, Level: hclog.Trace}) } return hclog.New(&hclog.LoggerOptions{ @@ -501,6 +501,12 @@ func (c *cluster) Leader() *Raft { // state. func (c *cluster) Followers() []*Raft { expFollowers := len(c.rafts) - 1 + return c.WaitForFollowers(expFollowers) +} + +// WaitForFollowers waits for the cluster to have a given number of followers and stay in a stable +// state. +func (c *cluster) WaitForFollowers(expFollowers int) []*Raft { followers := c.GetInState(Follower) if len(followers) != expFollowers { c.t.Fatalf("timeout waiting for %d followers (followers are %v)", expFollowers, followers) diff --git a/vendor/github.com/hashicorp/raft/transport.go b/vendor/github.com/hashicorp/raft/transport.go index 054fa624..c64fff6e 100644 --- a/vendor/github.com/hashicorp/raft/transport.go +++ b/vendor/github.com/hashicorp/raft/transport.go @@ -66,6 +66,16 @@ type Transport interface { TimeoutNow(id ServerID, target ServerAddress, args *TimeoutNowRequest, resp *TimeoutNowResponse) error } +// WithPreVote is an interface that a transport may provide which +// allows a transport to support a PreVote request. +// +// It is defined separately from Transport as unfortunately it wasn't in the +// original interface specification. +type WithPreVote interface { + // RequestPreVote sends the appropriate RPC to the target node. + RequestPreVote(id ServerID, target ServerAddress, args *RequestPreVoteRequest, resp *RequestPreVoteResponse) error +} + // WithClose is an interface that a transport may provide which // allows a transport to be shut down cleanly when a Raft instance // shuts down. @@ -81,9 +91,10 @@ type WithClose interface { // LoopbackTransport is an interface that provides a loopback transport suitable for testing // e.g. InmemTransport. It's there so we don't have to rewrite tests. type LoopbackTransport interface { - Transport // Embedded transport reference - WithPeers // Embedded peer management - WithClose // with a close routine + Transport // Embedded transport reference + WithPeers // Embedded peer management + WithClose // with a close routine + WithPreVote // with a prevote } // WithPeers is an interface that a transport may provide which allows for connection and diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s index 4b63d508..78e463f3 100644 --- a/vendor/github.com/klauspost/compress/s2/decode_arm64.s +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -60,7 +60,7 @@ // // The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. // The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. -TEXT ·s2Decode(SB), NOSPLIT, $56-64 +TEXT ·s2Decode(SB), NOSPLIT, $56-56 // Initialize R_SRC, R_DST and R_DBASE-R_SEND. MOVD dst_base+0(FP), R_DBASE MOVD dst_len+8(FP), R_DLEN diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go index 18a4f7ac..4229957b 100644 --- a/vendor/github.com/klauspost/compress/s2/index.go +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -17,6 +17,8 @@ const ( S2IndexHeader = "s2idx\x00" S2IndexTrailer = "\x00xdi2s" maxIndexEntries = 1 << 16 + // If distance is less than this, we do not add the entry. + minIndexDist = 1 << 20 ) // Index represents an S2/Snappy index. @@ -72,6 +74,10 @@ func (i *Index) add(compressedOffset, uncompressedOffset int64) error { if latest.compressedOffset > compressedOffset { return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) } + if latest.uncompressedOffset+minIndexDist > uncompressedOffset { + // Only add entry if distance is large enough. + return nil + } } i.info = append(i.info, struct { compressedOffset int64 @@ -122,7 +128,7 @@ func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err er // reduce to stay below maxIndexEntries func (i *Index) reduce() { - if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist { return } @@ -132,7 +138,7 @@ func (i *Index) reduce() { j := 0 // Each block should be at least 1MB, but don't reduce below 1000 entries. - for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 { + for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 { removeN++ } for idx := 0; idx < len(src); idx++ { diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go index 72bcb494..cbd1ed64 100644 --- a/vendor/github.com/klauspost/compress/s2/s2.go +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -109,7 +109,11 @@ const ( chunkTypeStreamIdentifier = 0xff ) -var crcTable = crc32.MakeTable(crc32.Castagnoli) +var ( + crcTable = crc32.MakeTable(crc32.Castagnoli) + magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes. + magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes. +) // crc implements the checksum specified in section 3 of // https://github.com/google/snappy/blob/master/framing_format.txt diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 637c9314..0a46f2b9 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -239,6 +239,9 @@ func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { } } if n2 == 0 { + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } break } n += int64(n2) @@ -314,9 +317,9 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -370,9 +373,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -478,9 +481,9 @@ func (w *Writer) write(p []byte) (nRet int, errRet error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -560,6 +563,9 @@ func (w *Writer) writeFull(inbuf []byte) (errRet error) { if w.concurrency == 1 { _, err := w.writeSync(inbuf[obufHeaderLen:]) + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } return err } @@ -569,9 +575,9 @@ func (w *Writer) writeFull(inbuf []byte) (errRet error) { hWriter := make(chan result) w.output <- hWriter if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} } else { - hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } @@ -637,9 +643,9 @@ func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { var n int var err error if w.snappy { - n, err = w.writer.Write([]byte(magicChunkSnappy)) + n, err = w.writer.Write(magicChunkSnappyBytes) } else { - n, err = w.writer.Write([]byte(magicChunk)) + n, err = w.writer.Write(magicChunkBytes) } if err != nil { return 0, w.err(err) diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe..b7b83164 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08..ae7d4d32 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index 30f8d296..21508edb 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -310,6 +310,7 @@ Exit Code 1 | AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | | AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | | AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| AVXVNNIINT16 | AVX-VNNI-INT16 instructions | | BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | | BMI1 | Bit Manipulation Instruction Set 1 | | BMI2 | Bit Manipulation Instruction Set 2 | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 805f5e7b..53bc18ca 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -104,6 +104,7 @@ const ( AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one AVXVNNI // AVX (VEX encoded) VNNI neural network instructions AVXVNNIINT8 // AVX-VNNI-INT8 instructions + AVXVNNIINT16 // AVX-VNNI-INT16 instructions BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 @@ -1242,6 +1243,7 @@ func support() flagSet { // CPUID.(EAX=7, ECX=1).EDX fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) fs.setIf(edx1&(1<<14) != 0, PREFETCHI) fs.setIf(edx1&(1<<19) != 0, AVX10) fs.setIf(edx1&(1<<21) != 0, APX_F) diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 57a085a5..3a256031 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -44,194 +44,195 @@ func _() { _ = x[AVXSLOW-34] _ = x[AVXVNNI-35] _ = x[AVXVNNIINT8-36] - _ = x[BHI_CTRL-37] - _ = x[BMI1-38] - _ = x[BMI2-39] - _ = x[CETIBT-40] - _ = x[CETSS-41] - _ = x[CLDEMOTE-42] - _ = x[CLMUL-43] - _ = x[CLZERO-44] - _ = x[CMOV-45] - _ = x[CMPCCXADD-46] - _ = x[CMPSB_SCADBS_SHORT-47] - _ = x[CMPXCHG8-48] - _ = x[CPBOOST-49] - _ = x[CPPC-50] - _ = x[CX16-51] - _ = x[EFER_LMSLE_UNS-52] - _ = x[ENQCMD-53] - _ = x[ERMS-54] - _ = x[F16C-55] - _ = x[FLUSH_L1D-56] - _ = x[FMA3-57] - _ = x[FMA4-58] - _ = x[FP128-59] - _ = x[FP256-60] - _ = x[FSRM-61] - _ = x[FXSR-62] - _ = x[FXSROPT-63] - _ = x[GFNI-64] - _ = x[HLE-65] - _ = x[HRESET-66] - _ = x[HTT-67] - _ = x[HWA-68] - _ = x[HYBRID_CPU-69] - _ = x[HYPERVISOR-70] - _ = x[IA32_ARCH_CAP-71] - _ = x[IA32_CORE_CAP-72] - _ = x[IBPB-73] - _ = x[IBPB_BRTYPE-74] - _ = x[IBRS-75] - _ = x[IBRS_PREFERRED-76] - _ = x[IBRS_PROVIDES_SMP-77] - _ = x[IBS-78] - _ = x[IBSBRNTRGT-79] - _ = x[IBSFETCHSAM-80] - _ = x[IBSFFV-81] - _ = x[IBSOPCNT-82] - _ = x[IBSOPCNTEXT-83] - _ = x[IBSOPSAM-84] - _ = x[IBSRDWROPCNT-85] - _ = x[IBSRIPINVALIDCHK-86] - _ = x[IBS_FETCH_CTLX-87] - _ = x[IBS_OPDATA4-88] - _ = x[IBS_OPFUSE-89] - _ = x[IBS_PREVENTHOST-90] - _ = x[IBS_ZEN4-91] - _ = x[IDPRED_CTRL-92] - _ = x[INT_WBINVD-93] - _ = x[INVLPGB-94] - _ = x[KEYLOCKER-95] - _ = x[KEYLOCKERW-96] - _ = x[LAHF-97] - _ = x[LAM-98] - _ = x[LBRVIRT-99] - _ = x[LZCNT-100] - _ = x[MCAOVERFLOW-101] - _ = x[MCDT_NO-102] - _ = x[MCOMMIT-103] - _ = x[MD_CLEAR-104] - _ = x[MMX-105] - _ = x[MMXEXT-106] - _ = x[MOVBE-107] - _ = x[MOVDIR64B-108] - _ = x[MOVDIRI-109] - _ = x[MOVSB_ZL-110] - _ = x[MOVU-111] - _ = x[MPX-112] - _ = x[MSRIRC-113] - _ = x[MSRLIST-114] - _ = x[MSR_PAGEFLUSH-115] - _ = x[NRIPS-116] - _ = x[NX-117] - _ = x[OSXSAVE-118] - _ = x[PCONFIG-119] - _ = x[POPCNT-120] - _ = x[PPIN-121] - _ = x[PREFETCHI-122] - _ = x[PSFD-123] - _ = x[RDPRU-124] - _ = x[RDRAND-125] - _ = x[RDSEED-126] - _ = x[RDTSCP-127] - _ = x[RRSBA_CTRL-128] - _ = x[RTM-129] - _ = x[RTM_ALWAYS_ABORT-130] - _ = x[SBPB-131] - _ = x[SERIALIZE-132] - _ = x[SEV-133] - _ = x[SEV_64BIT-134] - _ = x[SEV_ALTERNATIVE-135] - _ = x[SEV_DEBUGSWAP-136] - _ = x[SEV_ES-137] - _ = x[SEV_RESTRICTED-138] - _ = x[SEV_SNP-139] - _ = x[SGX-140] - _ = x[SGXLC-141] - _ = x[SHA-142] - _ = x[SME-143] - _ = x[SME_COHERENT-144] - _ = x[SPEC_CTRL_SSBD-145] - _ = x[SRBDS_CTRL-146] - _ = x[SRSO_MSR_FIX-147] - _ = x[SRSO_NO-148] - _ = x[SRSO_USER_KERNEL_NO-149] - _ = x[SSE-150] - _ = x[SSE2-151] - _ = x[SSE3-152] - _ = x[SSE4-153] - _ = x[SSE42-154] - _ = x[SSE4A-155] - _ = x[SSSE3-156] - _ = x[STIBP-157] - _ = x[STIBP_ALWAYSON-158] - _ = x[STOSB_SHORT-159] - _ = x[SUCCOR-160] - _ = x[SVM-161] - _ = x[SVMDA-162] - _ = x[SVMFBASID-163] - _ = x[SVML-164] - _ = x[SVMNP-165] - _ = x[SVMPF-166] - _ = x[SVMPFT-167] - _ = x[SYSCALL-168] - _ = x[SYSEE-169] - _ = x[TBM-170] - _ = x[TDX_GUEST-171] - _ = x[TLB_FLUSH_NESTED-172] - _ = x[TME-173] - _ = x[TOPEXT-174] - _ = x[TSCRATEMSR-175] - _ = x[TSXLDTRK-176] - _ = x[VAES-177] - _ = x[VMCBCLEAN-178] - _ = x[VMPL-179] - _ = x[VMSA_REGPROT-180] - _ = x[VMX-181] - _ = x[VPCLMULQDQ-182] - _ = x[VTE-183] - _ = x[WAITPKG-184] - _ = x[WBNOINVD-185] - _ = x[WRMSRNS-186] - _ = x[X87-187] - _ = x[XGETBV1-188] - _ = x[XOP-189] - _ = x[XSAVE-190] - _ = x[XSAVEC-191] - _ = x[XSAVEOPT-192] - _ = x[XSAVES-193] - _ = x[AESARM-194] - _ = x[ARMCPUID-195] - _ = x[ASIMD-196] - _ = x[ASIMDDP-197] - _ = x[ASIMDHP-198] - _ = x[ASIMDRDM-199] - _ = x[ATOMICS-200] - _ = x[CRC32-201] - _ = x[DCPOP-202] - _ = x[EVTSTRM-203] - _ = x[FCMA-204] - _ = x[FP-205] - _ = x[FPHP-206] - _ = x[GPA-207] - _ = x[JSCVT-208] - _ = x[LRCPC-209] - _ = x[PMULL-210] - _ = x[SHA1-211] - _ = x[SHA2-212] - _ = x[SHA3-213] - _ = x[SHA512-214] - _ = x[SM3-215] - _ = x[SM4-216] - _ = x[SVE-217] - _ = x[lastID-218] + _ = x[AVXVNNIINT16-37] + _ = x[BHI_CTRL-38] + _ = x[BMI1-39] + _ = x[BMI2-40] + _ = x[CETIBT-41] + _ = x[CETSS-42] + _ = x[CLDEMOTE-43] + _ = x[CLMUL-44] + _ = x[CLZERO-45] + _ = x[CMOV-46] + _ = x[CMPCCXADD-47] + _ = x[CMPSB_SCADBS_SHORT-48] + _ = x[CMPXCHG8-49] + _ = x[CPBOOST-50] + _ = x[CPPC-51] + _ = x[CX16-52] + _ = x[EFER_LMSLE_UNS-53] + _ = x[ENQCMD-54] + _ = x[ERMS-55] + _ = x[F16C-56] + _ = x[FLUSH_L1D-57] + _ = x[FMA3-58] + _ = x[FMA4-59] + _ = x[FP128-60] + _ = x[FP256-61] + _ = x[FSRM-62] + _ = x[FXSR-63] + _ = x[FXSROPT-64] + _ = x[GFNI-65] + _ = x[HLE-66] + _ = x[HRESET-67] + _ = x[HTT-68] + _ = x[HWA-69] + _ = x[HYBRID_CPU-70] + _ = x[HYPERVISOR-71] + _ = x[IA32_ARCH_CAP-72] + _ = x[IA32_CORE_CAP-73] + _ = x[IBPB-74] + _ = x[IBPB_BRTYPE-75] + _ = x[IBRS-76] + _ = x[IBRS_PREFERRED-77] + _ = x[IBRS_PROVIDES_SMP-78] + _ = x[IBS-79] + _ = x[IBSBRNTRGT-80] + _ = x[IBSFETCHSAM-81] + _ = x[IBSFFV-82] + _ = x[IBSOPCNT-83] + _ = x[IBSOPCNTEXT-84] + _ = x[IBSOPSAM-85] + _ = x[IBSRDWROPCNT-86] + _ = x[IBSRIPINVALIDCHK-87] + _ = x[IBS_FETCH_CTLX-88] + _ = x[IBS_OPDATA4-89] + _ = x[IBS_OPFUSE-90] + _ = x[IBS_PREVENTHOST-91] + _ = x[IBS_ZEN4-92] + _ = x[IDPRED_CTRL-93] + _ = x[INT_WBINVD-94] + _ = x[INVLPGB-95] + _ = x[KEYLOCKER-96] + _ = x[KEYLOCKERW-97] + _ = x[LAHF-98] + _ = x[LAM-99] + _ = x[LBRVIRT-100] + _ = x[LZCNT-101] + _ = x[MCAOVERFLOW-102] + _ = x[MCDT_NO-103] + _ = x[MCOMMIT-104] + _ = x[MD_CLEAR-105] + _ = x[MMX-106] + _ = x[MMXEXT-107] + _ = x[MOVBE-108] + _ = x[MOVDIR64B-109] + _ = x[MOVDIRI-110] + _ = x[MOVSB_ZL-111] + _ = x[MOVU-112] + _ = x[MPX-113] + _ = x[MSRIRC-114] + _ = x[MSRLIST-115] + _ = x[MSR_PAGEFLUSH-116] + _ = x[NRIPS-117] + _ = x[NX-118] + _ = x[OSXSAVE-119] + _ = x[PCONFIG-120] + _ = x[POPCNT-121] + _ = x[PPIN-122] + _ = x[PREFETCHI-123] + _ = x[PSFD-124] + _ = x[RDPRU-125] + _ = x[RDRAND-126] + _ = x[RDSEED-127] + _ = x[RDTSCP-128] + _ = x[RRSBA_CTRL-129] + _ = x[RTM-130] + _ = x[RTM_ALWAYS_ABORT-131] + _ = x[SBPB-132] + _ = x[SERIALIZE-133] + _ = x[SEV-134] + _ = x[SEV_64BIT-135] + _ = x[SEV_ALTERNATIVE-136] + _ = x[SEV_DEBUGSWAP-137] + _ = x[SEV_ES-138] + _ = x[SEV_RESTRICTED-139] + _ = x[SEV_SNP-140] + _ = x[SGX-141] + _ = x[SGXLC-142] + _ = x[SHA-143] + _ = x[SME-144] + _ = x[SME_COHERENT-145] + _ = x[SPEC_CTRL_SSBD-146] + _ = x[SRBDS_CTRL-147] + _ = x[SRSO_MSR_FIX-148] + _ = x[SRSO_NO-149] + _ = x[SRSO_USER_KERNEL_NO-150] + _ = x[SSE-151] + _ = x[SSE2-152] + _ = x[SSE3-153] + _ = x[SSE4-154] + _ = x[SSE42-155] + _ = x[SSE4A-156] + _ = x[SSSE3-157] + _ = x[STIBP-158] + _ = x[STIBP_ALWAYSON-159] + _ = x[STOSB_SHORT-160] + _ = x[SUCCOR-161] + _ = x[SVM-162] + _ = x[SVMDA-163] + _ = x[SVMFBASID-164] + _ = x[SVML-165] + _ = x[SVMNP-166] + _ = x[SVMPF-167] + _ = x[SVMPFT-168] + _ = x[SYSCALL-169] + _ = x[SYSEE-170] + _ = x[TBM-171] + _ = x[TDX_GUEST-172] + _ = x[TLB_FLUSH_NESTED-173] + _ = x[TME-174] + _ = x[TOPEXT-175] + _ = x[TSCRATEMSR-176] + _ = x[TSXLDTRK-177] + _ = x[VAES-178] + _ = x[VMCBCLEAN-179] + _ = x[VMPL-180] + _ = x[VMSA_REGPROT-181] + _ = x[VMX-182] + _ = x[VPCLMULQDQ-183] + _ = x[VTE-184] + _ = x[WAITPKG-185] + _ = x[WBNOINVD-186] + _ = x[WRMSRNS-187] + _ = x[X87-188] + _ = x[XGETBV1-189] + _ = x[XOP-190] + _ = x[XSAVE-191] + _ = x[XSAVEC-192] + _ = x[XSAVEOPT-193] + _ = x[XSAVES-194] + _ = x[AESARM-195] + _ = x[ARMCPUID-196] + _ = x[ASIMD-197] + _ = x[ASIMDDP-198] + _ = x[ASIMDHP-199] + _ = x[ASIMDRDM-200] + _ = x[ATOMICS-201] + _ = x[CRC32-202] + _ = x[DCPOP-203] + _ = x[EVTSTRM-204] + _ = x[FCMA-205] + _ = x[FP-206] + _ = x[FPHP-207] + _ = x[GPA-208] + _ = x[JSCVT-209] + _ = x[LRCPC-210] + _ = x[PMULL-211] + _ = x[SHA1-212] + _ = x[SHA2-213] + _ = x[SHA3-214] + _ = x[SHA512-215] + _ = x[SM3-216] + _ = x[SM4-217] + _ = x[SVE-218] + _ = x[lastID-219] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 561, 565, 579, 596, 599, 609, 620, 626, 634, 645, 653, 665, 681, 695, 706, 716, 731, 739, 750, 760, 767, 776, 786, 790, 793, 800, 805, 816, 823, 830, 838, 841, 847, 852, 861, 868, 876, 880, 883, 889, 896, 909, 914, 916, 923, 930, 936, 940, 949, 953, 958, 964, 970, 976, 986, 989, 1005, 1009, 1018, 1021, 1030, 1045, 1058, 1064, 1078, 1085, 1088, 1093, 1096, 1099, 1111, 1125, 1135, 1147, 1154, 1173, 1176, 1180, 1184, 1188, 1193, 1198, 1203, 1208, 1222, 1233, 1239, 1242, 1247, 1256, 1260, 1265, 1270, 1276, 1283, 1288, 1291, 1300, 1316, 1319, 1325, 1335, 1343, 1347, 1356, 1360, 1372, 1375, 1385, 1388, 1395, 1403, 1410, 1413, 1420, 1423, 1428, 1434, 1442, 1448, 1454, 1462, 1467, 1474, 1481, 1489, 1496, 1501, 1506, 1513, 1517, 1519, 1523, 1526, 1531, 1536, 1541, 1545, 1549, 1553, 1559, 1562, 1565, 1568, 1574} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 58275db3..10ddda14 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -188,6 +188,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery * 8914 - Extended DNS Errors * 8976 - Message Digest for DNS Zones (ZONEMD RR) +* 9460 - Service Binding and Parameter Specification via the DNS +* 9461 - Service Binding Mapping for DNS Servers +* 9462 - Discovery of Designated Resolvers ## Loosely Based Upon diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 7d1ade7d..c1a76995 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -55,7 +55,10 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { sx := []string{} p := 0 for { - i := escapedStringOffset(l.token[p:], 255) + i, ok := escapedStringOffset(l.token[p:], 255) + if !ok { + return nil, &ParseError{err: errstr, lex: l} + } if i != -1 && p+i != len(l.token) { sx = append(sx, l.token[p:p+i]) } else { @@ -1919,29 +1922,36 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { // escapedStringOffset finds the offset within a string (which may contain escape // sequences) that corresponds to a certain byte offset. If the input offset is -// out of bounds, -1 is returned. -func escapedStringOffset(s string, byteOffset int) int { - if byteOffset == 0 { - return 0 +// out of bounds, -1 is returned (which is *not* considered an error). +func escapedStringOffset(s string, desiredByteOffset int) (int, bool) { + if desiredByteOffset == 0 { + return 0, true } - offset := 0 - for i := 0; i < len(s); i++ { - offset += 1 + currentByteOffset, i := 0, 0 + + for i < len(s) { + currentByteOffset += 1 // Skip escape sequences if s[i] != '\\' { - // Not an escape sequence; nothing to do. + // Single plain byte, not an escape sequence. + i++ } else if isDDD(s[i+1:]) { - i += 3 + // Skip backslash and DDD. + i += 4 + } else if len(s[i+1:]) < 1 { + // No character following the backslash; that's an error. + return 0, false } else { - i++ + // Skip backslash and following byte. + i += 2 } - if offset >= byteOffset { - return i + 1 + if currentByteOffset >= desiredByteOffset { + return i, true } } - return -1 + return -1, true } diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 0207d6da..81580d1e 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -188,6 +188,14 @@ type DecorateReader func(Reader) Reader // Implementations should never return a nil Writer. type DecorateWriter func(Writer) Writer +// MsgInvalidFunc is a listener hook for observing incoming messages that were discarded +// because they could not be parsed. +// Every message that is read by a Reader will eventually be provided to the Handler, +// rejected (or ignored) by the MsgAcceptFunc, or passed to this function. +type MsgInvalidFunc func(m []byte, err error) + +func DefaultMsgInvalidFunc(m []byte, err error) {} + // A Server defines parameters for running an DNS server. type Server struct { // Address to listen on, ":dns" if empty. @@ -233,6 +241,8 @@ type Server struct { // AcceptMsgFunc will check the incoming message and will reject it early in the process. // By default DefaultMsgAcceptFunc will be used. MsgAcceptFunc MsgAcceptFunc + // MsgInvalidFunc is optional, will be called if a message is received but cannot be parsed. + MsgInvalidFunc MsgInvalidFunc // Shutdown handling lock sync.RWMutex @@ -277,6 +287,9 @@ func (srv *Server) init() { if srv.MsgAcceptFunc == nil { srv.MsgAcceptFunc = DefaultMsgAcceptFunc } + if srv.MsgInvalidFunc == nil { + srv.MsgInvalidFunc = DefaultMsgInvalidFunc + } if srv.Handler == nil { srv.Handler = DefaultServeMux } @@ -531,6 +544,7 @@ func (srv *Server) serveUDP(l net.PacketConn) error { if cap(m) == srv.UDPSize { srv.udpPool.Put(m[:srv.UDPSize]) } + srv.MsgInvalidFunc(m, ErrShortRead) continue } wg.Add(1) @@ -611,6 +625,7 @@ func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn func (srv *Server) serveDNS(m []byte, w *response) { dh, off, err := unpackMsgHdr(m, 0) if err != nil { + srv.MsgInvalidFunc(m, err) // Let client hang, they are sending crap; any reply can be used to amplify. return } @@ -620,10 +635,12 @@ func (srv *Server) serveDNS(m []byte, w *response) { switch action := srv.MsgAcceptFunc(dh); action { case MsgAccept: - if req.unpack(dh, m, off) == nil { + err := req.unpack(dh, m, off) + if err == nil { break } + srv.MsgInvalidFunc(m, err) fallthrough case MsgReject, MsgRejectNotImplemented: opcode := req.Opcode diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index c1a740b6..310c7d11 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -14,7 +14,7 @@ import ( // SVCBKey is the type of the keys used in the SVCB RR. type SVCBKey uint16 -// Keys defined in draft-ietf-dnsop-svcb-https-08 Section 14.3.2. +// Keys defined in rfc9460 const ( SVCB_MANDATORY SVCBKey = iota SVCB_ALPN @@ -23,7 +23,8 @@ const ( SVCB_IPV4HINT SVCB_ECHCONFIG SVCB_IPV6HINT - SVCB_DOHPATH // draft-ietf-add-svcb-dns-02 Section 9 + SVCB_DOHPATH // rfc9461 Section 5 + SVCB_OHTTP // rfc9540 Section 8 svcb_RESERVED SVCBKey = 65535 ) @@ -37,6 +38,7 @@ var svcbKeyToStringMap = map[SVCBKey]string{ SVCB_ECHCONFIG: "ech", SVCB_IPV6HINT: "ipv6hint", SVCB_DOHPATH: "dohpath", + SVCB_OHTTP: "ohttp", } var svcbStringToKeyMap = reverseSVCBKeyMap(svcbKeyToStringMap) @@ -201,6 +203,8 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue { return new(SVCBIPv6Hint) case SVCB_DOHPATH: return new(SVCBDoHPath) + case SVCB_OHTTP: + return new(SVCBOhttp) case svcb_RESERVED: return nil default: @@ -771,8 +775,8 @@ func (s *SVCBIPv6Hint) copy() SVCBKeyValue { // SVCBDoHPath pair is used to indicate the URI template that the // clients may use to construct a DNS over HTTPS URI. // -// See RFC xxxx (https://datatracker.ietf.org/doc/html/draft-ietf-add-svcb-dns-02) -// and RFC yyyy (https://datatracker.ietf.org/doc/html/draft-ietf-add-ddr-06). +// See RFC 9461 (https://datatracker.ietf.org/doc/html/rfc9461) +// and RFC 9462 (https://datatracker.ietf.org/doc/html/rfc9462). // // A basic example of using the dohpath option together with the alpn // option to indicate support for DNS over HTTPS on a certain path: @@ -816,6 +820,44 @@ func (s *SVCBDoHPath) copy() SVCBKeyValue { } } +// The "ohttp" SvcParamKey is used to indicate that a service described in a SVCB RR +// can be accessed as a target using an associated gateway. +// Both the presentation and wire-format values for the "ohttp" parameter MUST be empty. +// +// See RFC 9460 (https://datatracker.ietf.org/doc/html/rfc9460/) +// and RFC 9230 (https://datatracker.ietf.org/doc/html/rfc9230/) +// +// A basic example of using the dohpath option together with the alpn +// option to indicate support for DNS over HTTPS on a certain path: +// +// s := new(dns.SVCB) +// s.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET} +// e := new(dns.SVCBAlpn) +// e.Alpn = []string{"h2", "h3"} +// p := new(dns.SVCBOhttp) +// s.Value = append(s.Value, e, p) +type SVCBOhttp struct{} + +func (*SVCBOhttp) Key() SVCBKey { return SVCB_OHTTP } +func (*SVCBOhttp) copy() SVCBKeyValue { return &SVCBOhttp{} } +func (*SVCBOhttp) pack() ([]byte, error) { return []byte{}, nil } +func (*SVCBOhttp) String() string { return "" } +func (*SVCBOhttp) len() int { return 0 } + +func (*SVCBOhttp) unpack(b []byte) error { + if len(b) != 0 { + return errors.New("dns: svcbotthp: svcbotthp must have no value") + } + return nil +} + +func (*SVCBOhttp) parse(b string) error { + if b != "" { + return errors.New("dns: svcbotthp: svcbotthp must have no value") + } + return nil +} + // SVCBLocal pair is intended for experimental/private use. The key is recommended // to be in the range [SVCB_PRIVATE_LOWER, SVCB_PRIVATE_UPPER]. // Basic use pattern for creating a keyNNNNN option: diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 2187c456..5cfbb516 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -209,6 +209,7 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { // ch := make(chan *dns.Envelope) // tr := new(dns.Transfer) // var wg sync.WaitGroup +// wg.Add(1) // go func() { // tr.Out(w, r, ch) // wg.Done() diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go index 8c12c355..bb595626 100644 --- a/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -119,7 +119,7 @@ func (opts CopyDestOptions) Marshal(header http.Header) { if opts.ReplaceMetadata { header.Set("x-amz-metadata-directive", replaceDirective) for k, v := range filterCustomMeta(opts.UserMetadata) { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { header.Set(k, v) } else { header.Set("x-amz-meta-"+k, v) diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index 9e6b1543..d7fd2783 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -32,10 +32,18 @@ import ( func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } } gctx, cancel := context.WithCancel(ctx) @@ -649,10 +657,18 @@ func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getR func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { // Validate input arguments. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, ObjectInfo{}, nil, err + return nil, ObjectInfo{}, nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, ObjectInfo{}, nil, err + return nil, ObjectInfo{}, nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } } // Execute GET on objectName. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 9182d4ea..51226630 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -637,7 +637,9 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - opts = PutObjectOptions{} + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 4dec6040..6ccb5815 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -106,7 +106,11 @@ func (opts *PutObjectOptions) SetMatchETag(etag string) { if opts.customHeaders == nil { opts.customHeaders = http.Header{} } - opts.customHeaders.Set("If-Match", "\""+etag+"\"") + if etag == "*" { + opts.customHeaders.Set("If-Match", "*") + } else { + opts.customHeaders.Set("If-Match", "\""+etag+"\"") + } } // SetMatchETagExcept if etag does not match while PUT MinIO returns an @@ -116,7 +120,11 @@ func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { if opts.customHeaders == nil { opts.customHeaders = http.Header{} } - opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") + if etag == "*" { + opts.customHeaders.Set("If-None-Match", "*") + } else { + opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") + } } // getNumThreads - gets the number of threads to be used in the multipart @@ -212,7 +220,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { } for k, v := range opts.UserMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { header.Set(k, v) } else { header.Set("x-amz-meta-"+k, v) @@ -230,7 +238,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { // validate() checks if the UserMetadata map has standard headers or and raises an error if so. func (opts PutObjectOptions) validate() (err error) { for k, v := range opts.UserMetadata { - if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isValidReplicationEncryptionHeader(k) { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { return errInvalidArgument(k + " unsupported user defined metadata name") } if !httpguts.ValidHeaderFieldValue(v) { @@ -456,7 +464,9 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - opts = PutObjectOptions{} + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go index b043dc40..11455beb 100644 --- a/vendor/github.com/minio/minio-go/v7/api-stat.go +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -61,10 +61,18 @@ func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, err func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err + return ObjectInfo{}, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } } if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err + return ObjectInfo{}, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } } headers := opts.Header() if opts.Internal.ReplicationDeleteMarker { diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index be60529d..93755140 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -129,7 +129,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.70" + libraryVersion = "v7.0.74" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index b1979e32..e77bf9d4 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -1216,6 +1216,130 @@ func testPutObjectWithVersioning() { logSuccess(testName, function, args, startTime) } +func testListMultipartUpload() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Instantiate new minio client object. + opts := &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + } + c, err := minio.New(os.Getenv(serverEndpoint), opts) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + core, err := minio.NewCore(os.Getenv(serverEndpoint), opts) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO core client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + ctx := context.Background() + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + defer func() { + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + } + }() + objName := "prefix/objectName" + + want := minio.ListMultipartUploadsResult{ + Bucket: bucketName, + KeyMarker: "", + UploadIDMarker: "", + NextKeyMarker: "", + NextUploadIDMarker: "", + EncodingType: "url", + MaxUploads: 1000, + IsTruncated: false, + Prefix: "prefix/objectName", + Delimiter: "/", + CommonPrefixes: nil, + } + for i := 0; i < 5; i++ { + uid, err := core.NewMultipartUpload(ctx, bucketName, objName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload failed", err) + return + } + want.Uploads = append(want.Uploads, minio.ObjectMultipartInfo{ + Initiated: time.Time{}, + StorageClass: "", + Key: objName, + Size: 0, + UploadID: uid, + Err: nil, + }) + + for j := 0; j < 5; j++ { + cmpGot := func(call string, got minio.ListMultipartUploadsResult) bool { + for i := range got.Uploads { + got.Uploads[i].Initiated = time.Time{} + } + if !reflect.DeepEqual(want, got) { + err := fmt.Errorf("want: %#v\ngot : %#v", want, got) + logError(testName, function, args, startTime, "", call+" failed", err) + } + return true + } + got, err := core.ListMultipartUploads(ctx, bucketName, objName, "", "", "/", 1000) + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-prefix", got) { + return + } + got, err = core.ListMultipartUploads(ctx, bucketName, objName, objName, "", "/", 1000) + got.KeyMarker = "" + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-marker", got) { + return + } + } + if i > 2 { + err = core.AbortMultipartUpload(ctx, bucketName, objName, uid) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + want.Uploads = want.Uploads[:len(want.Uploads)-1] + } + } + for _, up := range want.Uploads { + err = core.AbortMultipartUpload(ctx, bucketName, objName, up.UploadID) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + } + logSuccess(testName, function, args, startTime) +} + func testCopyObjectWithVersioning() { // initialize logging params startTime := time.Now() @@ -13536,6 +13660,7 @@ func main() { // execute tests if isFullMode() { + testListMultipartUpload() testGetObjectAttributes() testGetObjectAttributesErrorCases() testMakeBucketErrorV2() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index 8c5c4eb2..541e1a72 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -26,7 +26,7 @@ import ( "strings" "time" - ini "gopkg.in/ini.v1" + "github.com/go-ini/ini" ) // A externalProcessCredentials stores the output of a credential_process diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go index f1c165b7..750e26ff 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -39,7 +39,7 @@ type FileMinioClient struct { Filename string // MinIO Alias to extract credentials from the shared credentials file. If empty - // will default to environment variable "MINIO_ALIAS" or "default" if + // will default to environment variable "MINIO_ALIAS" or "s3" if // environment variable is also not set. Alias string diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go index 10c95ffe..e706b57d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -414,11 +414,32 @@ func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) e return en.EncodeElement(expirationWrapper(e), startElement) } +// DelMarkerExpiration represents DelMarkerExpiration actions element in an ILM policy +type DelMarkerExpiration struct { + XMLName xml.Name `xml:"DelMarkerExpiration" json:"-"` + Days int `xml:"Days,omitempty" json:"Days,omitempty"` +} + +// IsNull returns true if Days isn't specified and false otherwise. +func (de DelMarkerExpiration) IsNull() bool { + return de.Days == 0 +} + +// MarshalXML avoids serializing an empty DelMarkerExpiration element +func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if de.IsNull() { + return nil + } + type delMarkerExp DelMarkerExpiration + return enc.EncodeElement(delMarkerExp(de), start) +} + // MarshalJSON customizes json encoding by omitting empty values func (r Rule) MarshalJSON() ([]byte, error) { type rule struct { AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` Expiration *Expiration `json:"Expiration,omitempty"` + DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"` ID string `json:"ID"` RuleFilter *Filter `json:"Filter,omitempty"` NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` @@ -442,6 +463,9 @@ func (r Rule) MarshalJSON() ([]byte, error) { if !r.Expiration.IsNull() { newr.Expiration = &r.Expiration } + if !r.DelMarkerExpiration.IsNull() { + newr.DelMarkerExpiration = &r.DelMarkerExpiration + } if !r.Transition.IsNull() { newr.Transition = &r.Transition } @@ -460,6 +484,7 @@ type Rule struct { XMLName xml.Name `xml:"Rule,omitempty" json:"-"` AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` + DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"` ID string `xml:"ID" json:"ID"` RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go index a44799d2..151ca21e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -50,6 +50,7 @@ const ( ObjectRemovedAll EventType = "s3:ObjectRemoved:*" ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" + ILMDelMarkerExpirationDelete EventType = "s3:LifecycleDelMarkerExpiration:Delete" ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" ObjectTransitionAll EventType = "s3:ObjectTransition:*" ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed" diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 0abbf6ef..65a2f75e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -406,6 +406,9 @@ func (c *Config) EditRule(opts Options) error { return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") } if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { + if c.Role == newRule.Destination.Bucket { + continue + } return fmt.Errorf("invalid destination bucket for this rule") } } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go index 056e78a6..0e63ce2f 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -226,7 +226,7 @@ func IsGoogleEndpoint(endpointURL url.URL) bool { if endpointURL == sentinelURL { return false } - return endpointURL.Host == "storage.googleapis.com" + return endpointURL.Hostname() == "storage.googleapis.com" } // Expects ascii encoded strings - from output of urlEncodePath diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 3f4881e8..3f023704 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -19,12 +19,14 @@ package minio import ( "encoding/base64" + "errors" "fmt" "net/http" "strings" "time" "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/tags" ) // expirationDateFormat date format for expiration key in json policy. @@ -152,6 +154,27 @@ func (p *PostPolicy) SetCondition(matchType, condition, value string) error { return errInvalidArgument("Invalid condition in policy") } +// SetTagging - Sets tagging for the object for this policy based upload. +func (p *PostPolicy) SetTagging(tagging string) error { + if strings.TrimSpace(tagging) == "" || tagging == "" { + return errInvalidArgument("No tagging specified.") + } + _, err := tags.ParseObjectXML(strings.NewReader(tagging)) + if err != nil { + return errors.New("The XML you provided was not well-formed or did not validate against our published schema.") //nolint + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$tagging", + value: tagging, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["tagging"] = tagging + return nil +} + // SetContentType - Sets content-type of the object for this policy // based upload. func (p *PostPolicy) SetContentType(contentType string) error { @@ -186,6 +209,23 @@ func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) erro return nil } +// SetContentDisposition - Sets content-disposition of the object for this policy +func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { + if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" { + return errInvalidArgument("No content disposition specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Disposition", + value: contentDisposition, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Disposition"] = contentDisposition + return nil +} + // SetContentLengthRange - Set new min and max content length // condition for all incoming uploads. func (p *PostPolicy) SetContentLengthRange(min, max int64) error { diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go index 068a6bfa..01cee8a1 100644 --- a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -44,6 +44,10 @@ var awsS3EndpointMap = map[string]awsS3Endpoint{ "s3.ca-central-1.amazonaws.com", "s3.dualstack.ca-central-1.amazonaws.com", }, + "ca-west-1": { + "s3.ca-west-1.amazonaws.com", + "s3.dualstack.ca-west-1.amazonaws.com", + }, "eu-west-1": { "s3.eu-west-1.amazonaws.com", "s3.dualstack.eu-west-1.amazonaws.com", diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index 252f4522..a5beb371 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -510,19 +510,9 @@ func isAmzHeader(headerKey string) bool { return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") } -var supportedReplicationEncryptionHeaders = map[string]bool{ - "x-minio-replication-server-side-encryption-sealed-key": true, - "x-minio-replication-server-side-encryption-seal-algorithm": true, - "x-minio-replication-server-side-encryption-iv": true, - "x-minio-replication-encrypted-multipart": true, - "x-minio-replication-actual-object-size": true, - // Add more supported headers here. - // Must be lower case. -} - -// isValidReplicationEncryptionHeader returns true if header is one of valid replication encryption headers -func isValidReplicationEncryptionHeader(headerKey string) bool { - return supportedReplicationEncryptionHeaders[strings.ToLower(headerKey)] +// isMinioHeader returns true if header is x-minio- header. +func isMinioHeader(headerKey string) bool { + return strings.HasPrefix(strings.ToLower(headerKey), "x-minio-") } // supportedQueryValues is a list of query strings that can be passed in when using GetObject. diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE new file mode 100644 index 00000000..bbc7b897 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile new file mode 100644 index 00000000..e33ee173 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt similarity index 100% rename from vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt rename to vendor/github.com/munnerz/goautoneg/README.txt diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go similarity index 52% rename from vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go rename to vendor/github.com/munnerz/goautoneg/autoneg.go index a21b9d15..1dd1cad6 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -1,28 +1,28 @@ /* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -36,6 +36,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + package goautoneg import ( @@ -51,16 +52,14 @@ type Accept struct { Params map[string]string } -// For internal use, so that we can use the sort interface -type accept_slice []Accept +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept -func (accept accept_slice) Len() int { - slice := []Accept(accept) +func (slice acceptSlice) Len() int { return len(slice) } -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) +func (slice acceptSlice) Less(i, j int) bool { ai, aj := slice[i], slice[j] if ai.Q > aj.Q { return true @@ -74,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool { return false } -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) +func (slice acceptSlice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + // Parse an Accept Header string returning a sorted list // of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) - mrp := strings.Split(part, ";") + a := Accept{ + Q: 1.0, + } + + sp, remainingPart := nextSplitElement(part, ";") - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } default: - continue + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) } - if len(mrp) == 1 { + if len(remainingPart) == 0 { accept = append(accept, a) continue } - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { continue } - token := strings.Trim(sp[0], " ") + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) + a.Q, _ = strconv.ParseFloat(sp1, 32) } else { - a.Params[token] = strings.Trim(sp[1], " ") + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) } } accept = append(accept, a) } - slice := accept_slice(accept) - sort.Sort(slice) - - return + sort.Sort(accept) + return accept } // Negotiate the most appropriate content_type given the accept header diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f6cbe7d..ff5ef7a9 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -21,9 +21,10 @@ import ( "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "github.com/prometheus/common/model" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" ) diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md b/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md index af72721a..aaa72fa8 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md +++ b/vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md @@ -12,6 +12,8 @@ $ benchstat bench.txt | tee benchstat.txt The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output. +Please note that `MapOf` got a number of optimizations since v2.3.1, so the current result is likely to be different. + ### Counter vs. atomic int64 ``` diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE b/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE index 83769719..261eeb9e 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE +++ b/vendor/github.com/puzpuzpuz/xsync/v3/LICENSE @@ -1,21 +1,201 @@ -MIT License - -Copyright (c) 2021 Andrey Pechkurov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/README.md b/vendor/github.com/puzpuzpuz/xsync/v3/README.md index cc86342c..6fe04976 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/README.md +++ b/vendor/github.com/puzpuzpuz/xsync/v3/README.md @@ -24,7 +24,7 @@ import ( ) ``` -*Note for v1 and v2 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial. +*Note for pre-v3 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial. ### Counter @@ -66,7 +66,9 @@ m.Store("foo", "bar") v, ok := m.Load("foo") ``` -One important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types: +Apart from CLHT, `MapOf` borrows ideas from Java's `j.u.c.ConcurrentHashMap` (immutable K/V pair structs instead of atomic snapshots) and C++'s `absl::flat_hash_map` (meta memory and SWAR-based lookups). It also has more dense memory layout when compared with `Map`. Long story short, `MapOf` should be preferred over `Map` when possible. + +An important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types: ```go type Point struct { @@ -78,6 +80,19 @@ m.Store(Point{42, 42}, 42) v, ok := m.Load(point{42, 42}) ``` +Both maps use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers: + +```go +m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 { + h := uint64(i) + h = (h ^ (h >> 33)) * 0xff51afd7ed558ccd + h = (h ^ (h >> 33)) * 0xc4ceb9fe1a85ec53 + return h ^ (h >> 33) +}) +``` + +When benchmarking concurrent maps, make sure to configure all of the competitors with the same hash function or, at least, take hash function performance into the consideration. + ### MPMCQueue A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue. @@ -133,6 +148,19 @@ Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as cac `RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods. +Apart from blocking methods, `RBMutex` also has methods for optimistic locking: +```go +mu := xsync.NewRBMutex() +if locked, t := mu.TryRLock(); locked { + // critical reader section... + mu.RUnlock(t) +} +if mu.TryLock() { + // critical writer section... + mu.Unlock() +} +``` + ## License Licensed under MIT. diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/map.go b/vendor/github.com/puzpuzpuz/xsync/v3/map.go index c5cd5e2b..6c5b6ebd 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/map.go +++ b/vendor/github.com/puzpuzpuz/xsync/v3/map.go @@ -19,7 +19,7 @@ const ( ) const ( - // number of entries per bucket; 3 entries lead to size of 64B + // number of Map entries per bucket; 3 entries lead to size of 64B // (one cache line) on 64-bit machines entriesPerMapBucket = 3 // threshold fraction of table occupation to start a table shrinking @@ -75,6 +75,7 @@ type Map struct { resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications) table unsafe.Pointer // *mapTable minTableLen int + growOnly bool } type mapTable struct { @@ -118,28 +119,70 @@ type rangeEntry struct { value unsafe.Pointer } -// NewMap creates a new Map instance. -func NewMap() *Map { - return NewMapPresized(defaultMinMapTableLen * entriesPerMapBucket) +// MapConfig defines configurable Map/MapOf options. +type MapConfig struct { + sizeHint int + growOnly bool } -// NewMapPresized creates a new Map instance with capacity enough to hold -// sizeHint entries. If sizeHint is zero or negative, the value is ignored. -func NewMapPresized(sizeHint int) *Map { +// WithPresize configures new Map/MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal +// capacity meaning that the underlying hash table will never shrink +// to a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +func WithPresize(sizeHint int) func(*MapConfig) { + return func(c *MapConfig) { + c.sizeHint = sizeHint + } +} + +// WithGrowOnly configures new Map/MapOf instance to be grow-only. +// This means that the underlying hash table grows in capacity when +// new keys are added, but does not shrink when keys are deleted. +// The only exception to this rule is the Clear method which +// shrinks the hash table back to the initial capacity. +func WithGrowOnly() func(*MapConfig) { + return func(c *MapConfig) { + c.growOnly = true + } +} + +// NewMap creates a new Map instance configured with the given +// options. +func NewMap(options ...func(*MapConfig)) *Map { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapBucket, + } + for _, o := range options { + o(c) + } + m := &Map{} m.resizeCond = *sync.NewCond(&m.resizeMu) var table *mapTable - if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { table = newMapTable(defaultMinMapTableLen) } else { - tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket)) + tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapBucket) / mapLoadFactor)) table = newMapTable(int(tableLen)) } m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly atomic.StorePointer(&m.table, unsafe.Pointer(table)) return m } +// NewMapPresized creates a new Map instance with capacity enough to hold +// sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMap in combination with WithPresize. +func NewMapPresized(sizeHint int) *Map { + return NewMap(WithPresize(sizeHint)) +} + func newMapTable(minTableLen int) *mapTable { buckets := make([]bucketPadded, minTableLen) counterLen := minTableLen >> 10 @@ -434,7 +477,7 @@ func (m *Map) doCompute( unlockBucket(&rootb.topHashMutex) return newValue, false } - // Create and append the bucket. + // Create and append a bucket. newb := new(bucketPadded) newb.keys[0] = unsafe.Pointer(&key) newb.values[0] = unsafe.Pointer(&newValue) @@ -470,8 +513,9 @@ func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) { knownTableLen := len(knownTable.buckets) // Fast path for shrink attempts. if hint == mapShrinkHint { - shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction) - if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapBucket)/mapShrinkFraction) { return } } @@ -722,23 +766,51 @@ func (table *mapTable) sumSize() int64 { return sum } -type mapStats struct { - RootBuckets int +// MapStats is Map/MapOf statistics. +// +// Warning: map statistics are intented to be used for diagnostic +// purposes, not for production code. This means that breaking changes +// may be introduced into this struct even between minor releases. +type MapStats struct { + // RootBuckets is the number of root buckets in the hash table. + // Each bucket holds a few entries. + RootBuckets int + // TotalBuckets is the total number of buckets in the hash table, + // including root and their chained buckets. Each bucket holds + // a few entries. TotalBuckets int + // EmptyBuckets is the number of buckets that hold no entries. EmptyBuckets int - Capacity int - Size int // calculated number of entries - Counter int // number of entries according to table counter - CounterLen int // number of counter stripes - MinEntries int // min entries per chain of buckets - MaxEntries int // max entries per chain of buckets + // Capacity is the Map/MapOf capacity, i.e. the total number of + // entries that all buckets can physically hold. This number + // does not consider the load factor. + Capacity int + // Size is the exact number of entries stored in the map. + Size int + // Counter is the number of entries stored in the map according + // to the internal atomic counter. In case of concurrent map + // modifications this number may be different from Size. + Counter int + // CounterLen is the number of internal atomic counter stripes. + // This number may grow with the map capacity to improve + // multithreaded scalability. + CounterLen int + // MinEntries is the minimum number of entries per a chain of + // buckets, i.e. a root bucket and its chained buckets. + MinEntries int + // MinEntries is the maximum number of entries per a chain of + // buckets, i.e. a root bucket and its chained buckets. + MaxEntries int + // TotalGrowths is the number of times the hash table grew. TotalGrowths int64 + // TotalGrowths is the number of times the hash table shrinked. TotalShrinks int64 } -func (s *mapStats) ToString() string { +// ToString returns string representation of map stats. +func (s *MapStats) ToString() string { var sb strings.Builder - sb.WriteString("\n---\n") + sb.WriteString("MapStats{\n") sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets)) sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets)) sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets)) @@ -750,13 +822,15 @@ func (s *mapStats) ToString() string { sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries)) sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths)) sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks)) - sb.WriteString("---\n") + sb.WriteString("}\n") return sb.String() } -// O(N) operation; use for debug purposes only -func (m *Map) stats() mapStats { - stats := mapStats{ +// Stats returns statistics for the Map. Just like other map +// methods, this one is thread-safe. Yet it's an O(N) operation, +// so it should be used only for diagnostics or debugging purposes. +func (m *Map) Stats() MapStats { + stats := MapStats{ TotalGrowths: atomic.LoadInt64(&m.totalGrowths), TotalShrinks: atomic.LoadInt64(&m.totalShrinks), MinEntries: math.MaxInt32, @@ -785,7 +859,7 @@ func (m *Map) stats() mapStats { if b.next == nil { break } - b = (*bucketPadded)(b.next) + b = (*bucketPadded)(atomic.LoadPointer(&b.next)) stats.TotalBuckets++ } if nentries < stats.MinEntries { diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go index 43886622..4c4ad086 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go +++ b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go @@ -8,6 +8,16 @@ import ( "unsafe" ) +const ( + // number of MapOf entries per bucket; 5 entries lead to size of 64B + // (one cache line) on 64-bit machines + entriesPerMapOfBucket = 5 + defaultMeta uint64 = 0x8080808080808080 + metaMask uint64 = 0xffffffffff + defaultMetaMasked uint64 = defaultMeta & metaMask + emptyMetaSlot uint8 = 0x80 +) + // MapOf is like a Go map[K]V but is safe for concurrent // use by multiple goroutines without additional locking or // coordination. It follows the interface of sync.Map with @@ -24,6 +34,11 @@ import ( // Also, Get operations involve no write to memory, as well as no // mutexes or any other sort of locks. Due to this design, in all // considered scenarios MapOf outperforms sync.Map. +// +// MapOf also borrows ideas from Java's j.u.c.ConcurrentHashMap +// (immutable K/V pair structs instead of atomic snapshots) +// and C++'s absl::flat_hash_map (meta memory and SWAR-based +// lookups). type MapOf[K comparable, V any] struct { totalGrowths int64 totalShrinks int64 @@ -33,6 +48,7 @@ type MapOf[K comparable, V any] struct { table unsafe.Pointer // *mapOfTable hasher func(K, uint64) uint64 minTableLen int + growOnly bool } type mapOfTable[K comparable, V any] struct { @@ -45,7 +61,7 @@ type mapOfTable[K comparable, V any] struct { } // bucketOfPadded is a CL-sized map bucket holding up to -// entriesPerMapBucket entries. +// entriesPerMapOfBucket entries. type bucketOfPadded struct { //lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte @@ -53,9 +69,9 @@ type bucketOfPadded struct { } type bucketOf struct { - hashes [entriesPerMapBucket]uint64 - entries [entriesPerMapBucket]unsafe.Pointer // *entryOf - next unsafe.Pointer // *bucketOfPadded + meta uint64 + entries [entriesPerMapOfBucket]unsafe.Pointer // *entryOf + next unsafe.Pointer // *bucketOfPadded mu sync.Mutex } @@ -65,39 +81,59 @@ type entryOf[K comparable, V any] struct { value V } -// NewMapOf creates a new MapOf instance. -func NewMapOf[K comparable, V any]() *MapOf[K, V] { - return NewMapOfPresized[K, V](defaultMinMapTableLen * entriesPerMapBucket) +// NewMapOf creates a new MapOf instance configured with the given +// options. +func NewMapOf[K comparable, V any](options ...func(*MapConfig)) *MapOf[K, V] { + return NewMapOfWithHasher[K, V](defaultHasher[K](), options...) } -// NewMapOfPresized creates a new MapOf instance with capacity enough -// to hold sizeHint entries. If sizeHint is zero or negative, the value -// is ignored. -func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { - return newMapOfPresized[K, V](makeHasher[K](), sizeHint) -} - -func newMapOfPresized[K comparable, V any]( +// NewMapOfWithHasher creates a new MapOf instance configured with +// the given hasher and options. The hash function is used instead +// of the built-in hash function configured when a map is created +// with the NewMapOf function. +func NewMapOfWithHasher[K comparable, V any]( hasher func(K, uint64) uint64, - sizeHint int, + options ...func(*MapConfig), ) *MapOf[K, V] { + c := &MapConfig{ + sizeHint: defaultMinMapTableLen * entriesPerMapOfBucket, + } + for _, o := range options { + o(c) + } + m := &MapOf[K, V]{} m.resizeCond = *sync.NewCond(&m.resizeMu) m.hasher = hasher var table *mapOfTable[K, V] - if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket { + if c.sizeHint <= defaultMinMapTableLen*entriesPerMapOfBucket { table = newMapOfTable[K, V](defaultMinMapTableLen) } else { - tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket)) + tableLen := nextPowOf2(uint32((float64(c.sizeHint) / entriesPerMapOfBucket) / mapLoadFactor)) table = newMapOfTable[K, V](int(tableLen)) } m.minTableLen = len(table.buckets) + m.growOnly = c.growOnly atomic.StorePointer(&m.table, unsafe.Pointer(table)) return m } +// NewMapOfPresized creates a new MapOf instance with capacity enough +// to hold sizeHint entries. The capacity is treated as the minimal capacity +// meaning that the underlying hash table will never shrink to +// a smaller capacity. If sizeHint is zero or negative, the value +// is ignored. +// +// Deprecated: use NewMapOf in combination with WithPresize. +func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] { + return NewMapOf[K, V](WithPresize(sizeHint)) +} + func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] { buckets := make([]bucketOfPadded, minTableLen) + for i := range buckets { + buckets[i].meta = defaultMeta + } counterLen := minTableLen >> 10 if counterLen < minMapCounterLen { counterLen = minMapCounterLen @@ -118,25 +154,24 @@ func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] { // The ok result indicates whether value was found in the map. func (m *MapOf[K, V]) Load(key K) (value V, ok bool) { table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) - hash := shiftHash(m.hasher(key, table.seed)) - bidx := uint64(len(table.buckets)-1) & hash + hash := m.hasher(key, table.seed) + h1 := h1(hash) + h2w := broadcast(h2(hash)) + bidx := uint64(len(table.buckets)-1) & h1 b := &table.buckets[bidx] for { - for i := 0; i < entriesPerMapBucket; i++ { - // We treat the hash code only as a hint, so there is no - // need to get an atomic snapshot. - h := atomic.LoadUint64(&b.hashes[i]) - if h == uint64(0) || h != hash { - continue - } - eptr := atomic.LoadPointer(&b.entries[i]) - if eptr == nil { - continue - } - e := (*entryOf[K, V])(eptr) - if e.key == key { - return e.value, true + metaw := atomic.LoadUint64(&b.meta) + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + eptr := atomic.LoadPointer(&b.entries[idx]) + if eptr != nil { + e := (*entryOf[K, V])(eptr) + if e.key == key { + return e.value, true + } } + markedw &= markedw - 1 } bptr := atomic.LoadPointer(&b.next) if bptr == nil { @@ -268,14 +303,16 @@ func (m *MapOf[K, V]) doCompute( for { compute_attempt: var ( - emptyb *bucketOfPadded - emptyidx int - hintNonEmpty int + emptyb *bucketOfPadded + emptyidx int ) table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table)) tableLen := len(table.buckets) - hash := shiftHash(m.hasher(key, table.seed)) - bidx := uint64(len(table.buckets)-1) & hash + hash := m.hasher(key, table.seed) + h1 := h1(hash) + h2 := h2(hash) + h2w := broadcast(h2) + bidx := uint64(len(table.buckets)-1) & h1 rootb := &table.buckets[bidx] rootb.mu.Lock() // The following two checks must go in reverse to what's @@ -293,62 +330,62 @@ func (m *MapOf[K, V]) doCompute( } b := rootb for { - for i := 0; i < entriesPerMapBucket; i++ { - h := atomic.LoadUint64(&b.hashes[i]) - if h == uint64(0) { - if emptyb == nil { - emptyb = b - emptyidx = i - } - continue - } - if h != hash { - hintNonEmpty++ - continue - } - e := (*entryOf[K, V])(b.entries[i]) - if e.key == key { - if loadIfExists { - rootb.mu.Unlock() - return e.value, !computeOnly - } - // In-place update/delete. - // We get a copy of the value via an interface{} on each call, - // thus the live value pointers are unique. Otherwise atomic - // snapshot won't be correct in case of multiple Store calls - // using the same value. - oldv := e.value - newv, del := valueFn(oldv, true) - if del { - // Deletion. - // First we update the hash, then the entry. - atomic.StoreUint64(&b.hashes[i], uint64(0)) - atomic.StorePointer(&b.entries[i], nil) - leftEmpty := false - if hintNonEmpty == 0 { - leftEmpty = isEmptyBucketOf(b) + metaw := b.meta + markedw := markZeroBytes(metaw^h2w) & metaMask + for markedw != 0 { + idx := firstMarkedByteIndex(markedw) + eptr := b.entries[idx] + if eptr != nil { + e := (*entryOf[K, V])(eptr) + if e.key == key { + if loadIfExists { + rootb.mu.Unlock() + return e.value, !computeOnly } + // In-place update/delete. + // We get a copy of the value via an interface{} on each call, + // thus the live value pointers are unique. Otherwise atomic + // snapshot won't be correct in case of multiple Store calls + // using the same value. + oldv := e.value + newv, del := valueFn(oldv, true) + if del { + // Deletion. + // First we update the hash, then the entry. + newmetaw := setByte(metaw, emptyMetaSlot, idx) + atomic.StoreUint64(&b.meta, newmetaw) + atomic.StorePointer(&b.entries[idx], nil) + rootb.mu.Unlock() + table.addSize(bidx, -1) + // Might need to shrink the table if we left bucket empty. + if newmetaw == defaultMeta { + m.resize(table, mapShrinkHint) + } + return oldv, !computeOnly + } + newe := new(entryOf[K, V]) + newe.key = key + newe.value = newv + atomic.StorePointer(&b.entries[idx], unsafe.Pointer(newe)) rootb.mu.Unlock() - table.addSize(bidx, -1) - // Might need to shrink the table. - if leftEmpty { - m.resize(table, mapShrinkHint) + if computeOnly { + // Compute expects the new value to be returned. + return newv, true } - return oldv, !computeOnly + // LoadAndStore expects the old value to be returned. + return oldv, true } - newe := new(entryOf[K, V]) - newe.key = key - newe.value = newv - atomic.StorePointer(&b.entries[i], unsafe.Pointer(newe)) - rootb.mu.Unlock() - if computeOnly { - // Compute expects the new value to be returned. - return newv, true - } - // LoadAndStore expects the old value to be returned. - return oldv, true } - hintNonEmpty++ + markedw &= markedw - 1 + } + if emptyb == nil { + // Search for empty entries (up to 5 per bucket). + emptyw := metaw & defaultMetaMasked + if emptyw != 0 { + idx := firstMarkedByteIndex(emptyw) + emptyb = b + emptyidx = idx + } } if b.next == nil { if emptyb != nil { @@ -362,14 +399,14 @@ func (m *MapOf[K, V]) doCompute( newe := new(entryOf[K, V]) newe.key = key newe.value = newValue - // First we update the hash, then the entry. - atomic.StoreUint64(&emptyb.hashes[emptyidx], hash) + // First we update meta, then the entry. + atomic.StoreUint64(&emptyb.meta, setByte(emptyb.meta, h2, emptyidx)) atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe)) rootb.mu.Unlock() table.addSize(bidx, 1) return newValue, computeOnly } - growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor + growThreshold := float64(tableLen) * entriesPerMapOfBucket * mapLoadFactor if table.sumSize() > int64(growThreshold) { // Need to grow the table. Then go for another attempt. rootb.mu.Unlock() @@ -383,9 +420,9 @@ func (m *MapOf[K, V]) doCompute( rootb.mu.Unlock() return newValue, false } - // Create and append the bucket. + // Create and append a bucket. newb := new(bucketOfPadded) - newb.hashes[0] = hash + newb.meta = setByte(defaultMeta, h2, 0) newe := new(entryOf[K, V]) newe.key = key newe.value = newValue @@ -421,8 +458,9 @@ func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) { knownTableLen := len(knownTable.buckets) // Fast path for shrink attempts. if hint == mapShrinkHint { - shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction) - if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold { + if m.growOnly || + m.minTableLen == knownTableLen || + knownTable.sumSize() > int64((knownTableLen*entriesPerMapOfBucket)/mapShrinkFraction) { return } } @@ -441,7 +479,7 @@ func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) { atomic.AddInt64(&m.totalGrowths, 1) newTable = newMapOfTable[K, V](tableLen << 1) case mapShrinkHint: - shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction) + shrinkThreshold := int64((tableLen * entriesPerMapOfBucket) / mapShrinkFraction) if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold { // Shrink the table with factor of 2. atomic.AddInt64(&m.totalShrinks, 1) @@ -482,13 +520,13 @@ func copyBucketOf[K comparable, V any]( rootb := b rootb.mu.Lock() for { - for i := 0; i < entriesPerMapBucket; i++ { + for i := 0; i < entriesPerMapOfBucket; i++ { if b.entries[i] != nil { e := (*entryOf[K, V])(b.entries[i]) - hash := shiftHash(hasher(e.key, destTable.seed)) - bidx := uint64(len(destTable.buckets)-1) & hash + hash := hasher(e.key, destTable.seed) + bidx := uint64(len(destTable.buckets)-1) & h1(hash) destb := &destTable.buckets[bidx] - appendToBucketOf(hash, b.entries[i], destb) + appendToBucketOf(h2(hash), b.entries[i], destb) copied++ } } @@ -516,7 +554,7 @@ func copyBucketOf[K comparable, V any]( func (m *MapOf[K, V]) Range(f func(key K, value V) bool) { var zeroPtr unsafe.Pointer // Pre-allocate array big enough to fit entries for most hash tables. - bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapBucket) + bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapOfBucket) tablep := atomic.LoadPointer(&m.table) table := *(*mapOfTable[K, V])(tablep) for i := range table.buckets { @@ -526,7 +564,7 @@ func (m *MapOf[K, V]) Range(f func(key K, value V) bool) { // the intermediate slice. rootb.mu.Lock() for { - for i := 0; i < entriesPerMapBucket; i++ { + for i := 0; i < entriesPerMapOfBucket; i++ { if b.entries[i] != nil { bentries = append(bentries, b.entries[i]) } @@ -563,18 +601,18 @@ func (m *MapOf[K, V]) Size() int { return int(table.sumSize()) } -func appendToBucketOf(hash uint64, entryPtr unsafe.Pointer, b *bucketOfPadded) { +func appendToBucketOf(h2 uint8, entryPtr unsafe.Pointer, b *bucketOfPadded) { for { - for i := 0; i < entriesPerMapBucket; i++ { + for i := 0; i < entriesPerMapOfBucket; i++ { if b.entries[i] == nil { - b.hashes[i] = hash + b.meta = setByte(b.meta, h2, i) b.entries[i] = entryPtr return } } if b.next == nil { newb := new(bucketOfPadded) - newb.hashes[0] = hash + newb.meta = setByte(defaultMeta, h2, 0) newb.entries[0] = entryPtr b.next = unsafe.Pointer(newb) return @@ -583,21 +621,6 @@ func appendToBucketOf(hash uint64, entryPtr unsafe.Pointer, b *bucketOfPadded) { } } -func isEmptyBucketOf(rootb *bucketOfPadded) bool { - b := rootb - for { - for i := 0; i < entriesPerMapBucket; i++ { - if b.entries[i] != nil { - return false - } - } - if b.next == nil { - return true - } - b = (*bucketOfPadded)(b.next) - } -} - func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) { cidx := uint64(len(table.size)-1) & bucketIdx atomic.AddInt64(&table.size[cidx].c, int64(delta)) @@ -616,17 +639,19 @@ func (table *mapOfTable[K, V]) sumSize() int64 { return sum } -func shiftHash(h uint64) uint64 { - // uint64(0) is a reserved value which stands for an empty slot. - if h == uint64(0) { - return uint64(1) - } - return h +func h1(h uint64) uint64 { + return h >> 7 } -// O(N) operation; use for debug purposes only -func (m *MapOf[K, V]) stats() mapStats { - stats := mapStats{ +func h2(h uint64) uint8 { + return uint8(h & 0x7f) +} + +// Stats returns statistics for the MapOf. Just like other map +// methods, this one is thread-safe. Yet it's an O(N) operation, +// so it should be used only for diagnostics or debugging purposes. +func (m *MapOf[K, V]) Stats() MapStats { + stats := MapStats{ TotalGrowths: atomic.LoadInt64(&m.totalGrowths), TotalShrinks: atomic.LoadInt64(&m.totalShrinks), MinEntries: math.MaxInt32, @@ -641,8 +666,8 @@ func (m *MapOf[K, V]) stats() mapStats { stats.TotalBuckets++ for { nentriesLocal := 0 - stats.Capacity += entriesPerMapBucket - for i := 0; i < entriesPerMapBucket; i++ { + stats.Capacity += entriesPerMapOfBucket + for i := 0; i < entriesPerMapOfBucket; i++ { if atomic.LoadPointer(&b.entries[i]) != nil { stats.Size++ nentriesLocal++ @@ -655,7 +680,7 @@ func (m *MapOf[K, V]) stats() mapStats { if b.next == nil { break } - b = (*bucketOfPadded)(b.next) + b = (*bucketOfPadded)(atomic.LoadPointer(&b.next)) stats.TotalBuckets++ } if nentries < stats.MinEntries { diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go b/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go index a20a1416..4cbd9c41 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go +++ b/vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go @@ -64,12 +64,41 @@ func NewRBMutex() *RBMutex { return &mu } +// TryRLock tries to lock m for reading without blocking. +// When TryRLock succeeds, it returns true and a reader token. +// In case of a failure, a false is returned. +func (mu *RBMutex) TryRLock() (bool, *RToken) { + if t := mu.fastRlock(); t != nil { + return true, t + } + // Optimistic slow path. + if mu.rw.TryRLock() { + if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { + atomic.StoreInt32(&mu.rbias, 1) + } + return true, nil + } + return false, nil +} + // RLock locks m for reading and returns a reader token. The // token must be used in the later RUnlock call. // // Should not be used for recursive read locking; a blocked Lock // call excludes new readers from acquiring the lock. func (mu *RBMutex) RLock() *RToken { + if t := mu.fastRlock(); t != nil { + return t + } + // Slow path. + mu.rw.RLock() + if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { + atomic.StoreInt32(&mu.rbias, 1) + } + return nil +} + +func (mu *RBMutex) fastRlock() *RToken { if atomic.LoadInt32(&mu.rbias) == 1 { t, ok := rtokenPool.Get().(*RToken) if !ok { @@ -87,19 +116,14 @@ func (mu *RBMutex) RLock() *RToken { t.slot = slot return t } - // The mutex is no longer reader biased. Go to the slow path. + // The mutex is no longer reader biased. Roll back. atomic.AddInt32(&rslot.mu, -1) rtokenPool.Put(t) - break + return nil } // Contention detected. Give a try with the next slot. } } - // Slow path. - mu.rw.RLock() - if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) { - atomic.StoreInt32(&mu.rbias, 1) - } return nil } @@ -118,6 +142,25 @@ func (mu *RBMutex) RUnlock(t *RToken) { rtokenPool.Put(t) } +// TryLock tries to lock m for writing without blocking. +func (mu *RBMutex) TryLock() bool { + if mu.rw.TryLock() { + if atomic.LoadInt32(&mu.rbias) == 1 { + atomic.StoreInt32(&mu.rbias, 0) + for i := 0; i < len(mu.rslots); i++ { + if atomic.LoadInt32(&mu.rslots[i].mu) > 0 { + // There is a reader. Roll back. + atomic.StoreInt32(&mu.rbias, 1) + mu.rw.Unlock() + return false + } + } + } + return true + } + return false +} + // Lock locks m for writing. If the lock is already locked for // reading or writing, Lock blocks until the lock is available. func (mu *RBMutex) Lock() { diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/util.go b/vendor/github.com/puzpuzpuz/xsync/v3/util.go index 73689120..76927089 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/util.go +++ b/vendor/github.com/puzpuzpuz/xsync/v3/util.go @@ -1,6 +1,7 @@ package xsync import ( + "math/bits" "runtime" _ "unsafe" ) @@ -44,3 +45,22 @@ func parallelism() uint32 { //go:noescape //go:linkname runtime_fastrand runtime.fastrand func runtime_fastrand() uint32 + +func broadcast(b uint8) uint64 { + return 0x101010101010101 * uint64(b) +} + +func firstMarkedByteIndex(w uint64) int { + return bits.TrailingZeros64(w) >> 3 +} + +// SWAR byte search: may produce false positives, e.g. for 0x0100, +// so make sure to double-check bytes found by this function. +func markZeroBytes(w uint64) uint64 { + return ((w - 0x0101010101010101) & (^w) & 0x8080808080808080) +} + +func setByte(w uint64, b uint8, idx int) uint64 { + shift := idx << 3 + return (w &^ (0xff << shift)) | (uint64(b) << shift) +} diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go b/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go index 9588dcdd..9aa65972 100644 --- a/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go +++ b/vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go @@ -33,10 +33,10 @@ func hashString(s string, seed uint64) uint64 { //go:linkname runtime_memhash runtime.memhash func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr -// makeHasher creates a fast hash function for the given comparable type. +// defaultHasher creates a fast hash function for the given comparable type. // The only limitation is that the type should not contain interfaces inside // based on runtime.typehash. -func makeHasher[T comparable]() func(T, uint64) uint64 { +func defaultHasher[T comparable]() func(T, uint64) uint64 { var zero T if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface { diff --git a/vendor/github.com/swaggo/files/v2/dist/swagger-ui-bundle.js b/vendor/github.com/swaggo/files/v2/dist/swagger-ui-bundle.js index 763a1166..ef539d2c 100644 --- a/vendor/github.com/swaggo/files/v2/dist/swagger-ui-bundle.js +++ b/vendor/github.com/swaggo/files/v2/dist/swagger-ui-bundle.js @@ -1,3 +1,2 @@ /*! For license information please see swagger-ui-bundle.js.LICENSE.txt */ -!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.SwaggerUIBundle=t():e.SwaggerUIBundle=t()}(this,(function(){return(()=>{var e={17967:(e,t)=>{"use strict";t.N=void 0;var r=/^([^\w]*)(javascript|data|vbscript)/im,n=/&#(\w+)(^\w|;)?/g,o=/[\u0000-\u001F\u007F-\u009F\u2000-\u200D\uFEFF]/gim,a=/^([^:]+):/gm,i=[".","/"];t.N=function(e){var t,s=(t=e||"",t.replace(n,(function(e,t){return String.fromCharCode(t)}))).replace(o,"").trim();if(!s)return"about:blank";if(function(e){return i.indexOf(e[0])>-1}(s))return s;var l=s.match(a);if(!l)return s;var u=l[0];return r.test(u)?"about:blank":s}},53795:(e,t,r)=>{"use strict";r.d(t,{Z:()=>P});var n=r(23101),o=r.n(n),a=r(61125),i=r.n(a),s=r(11882),l=r.n(s),u=r(97606),c=r.n(u),p=r(67294),f=r(43393);function h(e){return h="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},h(e)}function d(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function m(e,t){for(var r=0;r1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},n=k(t,r),o=e||Object.keys(y({},r,{},t));return o.every(n)}function k(e,t){return function(r){if("string"==typeof r)return(0,f.is)(t[r],e[r]);if(Array.isArray(r))return(0,f.is)(S(t,r),S(e,r));throw new TypeError("Invalid key: expected Array or string: "+r)}}var C=function(e){function t(){return d(this,t),E(this,b(t).apply(this,arguments))}var r,n,o;return function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&w(e,t)}(t,e),r=t,n=[{key:"shouldComponentUpdate",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return!A(this.updateOnProps,this.props,e,"updateOnProps")||!A(this.updateOnStates,this.state,t,"updateOnStates")}}],n&&m(r.prototype,n),o&&m(r,o),t}(p.Component);const O=C;var j=r(23930),I=r.n(j),N=r(45697),T=r.n(N);class P extends O{constructor(){super(...arguments),i()(this,"getModelName",(e=>-1!==l()(e).call(e,"#/definitions/")?e.replace(/^.*#\/definitions\//,""):-1!==l()(e).call(e,"#/components/schemas/")?e.replace(/^.*#\/components\/schemas\//,""):void 0)),i()(this,"getRefSchema",(e=>{let{specSelectors:t}=this.props;return t.findDefinition(e)}))}render(){let{getComponent:e,getConfigs:t,specSelectors:n,schema:a,required:i,name:s,isRef:l,specPath:u,displayName:c,includeReadOnly:f,includeWriteOnly:h}=this.props;const d=e("ObjectModel"),m=e("ArrayModel"),g=e("PrimitiveModel");let v="object",y=a&&a.get("$$ref");if(!s&&y&&(s=this.getModelName(y)),!a&&y&&(a=this.getRefSchema(s)),!a)return p.createElement("span",{className:"model model-title"},p.createElement("span",{className:"model-title__text"},c||s),p.createElement("img",{src:r(2517),height:"20px",width:"20px"}));const b=n.isOAS3()&&a.get("deprecated");switch(l=void 0!==l?l:!!y,v=a&&a.get("type")||v,v){case"object":return p.createElement(d,o()({className:"object"},this.props,{specPath:u,getConfigs:t,schema:a,name:s,deprecated:b,isRef:l,includeReadOnly:f,includeWriteOnly:h}));case"array":return p.createElement(m,o()({className:"array"},this.props,{getConfigs:t,schema:a,name:s,deprecated:b,required:i,includeReadOnly:f,includeWriteOnly:h}));default:return p.createElement(g,o()({},this.props,{getComponent:e,getConfigs:t,schema:a,name:s,deprecated:b,required:i}))}}}i()(P,"propTypes",{schema:c()(I()).isRequired,getComponent:T().func.isRequired,getConfigs:T().func.isRequired,specSelectors:T().object.isRequired,name:T().string,displayName:T().string,isRef:T().bool,required:T().bool,expandDepth:T().number,depth:T().number,specPath:I().list.isRequired,includeReadOnly:T().bool,includeWriteOnly:T().bool})},5623:(e,t,r)=>{"use strict";r.d(t,{Z:()=>f});var n=r(61125),o=r.n(n),a=r(28222),i=r.n(a),s=r(67294),l=r(84564),u=r.n(l),c=r(90242),p=r(27504);class f extends s.Component{constructor(e,t){super(e,t),o()(this,"getDefinitionUrl",(()=>{let{specSelectors:e}=this.props;return new(u())(e.url(),p.Z.location).toString()}));let{getConfigs:r}=e,{validatorUrl:n}=r();this.state={url:this.getDefinitionUrl(),validatorUrl:void 0===n?"https://validator.swagger.io/validator":n}}UNSAFE_componentWillReceiveProps(e){let{getConfigs:t}=e,{validatorUrl:r}=t();this.setState({url:this.getDefinitionUrl(),validatorUrl:void 0===r?"https://validator.swagger.io/validator":r})}render(){let{getConfigs:e}=this.props,{spec:t}=e(),r=(0,c.Nm)(this.state.validatorUrl);return"object"==typeof t&&i()(t).length?null:this.state.url&&(0,c.hW)(this.state.validatorUrl)&&(0,c.hW)(this.state.url)?s.createElement("span",{className:"float-right"},s.createElement("a",{target:"_blank",rel:"noopener noreferrer",href:`${r}/debug?url=${encodeURIComponent(this.state.url)}`},s.createElement(h,{src:`${r}?url=${encodeURIComponent(this.state.url)}`,alt:"Online validator badge"}))):null}}class h extends s.Component{constructor(e){super(e),this.state={loaded:!1,error:!1}}componentDidMount(){const e=new Image;e.onload=()=>{this.setState({loaded:!0})},e.onerror=()=>{this.setState({error:!0})},e.src=this.props.src}UNSAFE_componentWillReceiveProps(e){if(e.src!==this.props.src){const t=new Image;t.onload=()=>{this.setState({loaded:!0})},t.onerror=()=>{this.setState({error:!0})},t.src=e.src}}render(){return this.state.error?s.createElement("img",{alt:"Error"}):this.state.loaded?s.createElement("img",{src:this.props.src,alt:this.props.alt}):null}}},86019:(e,t,r)=>{"use strict";r.d(t,{Z:()=>me,s:()=>ge});var n=r(67294),o=r(89927);function a(e,t){if(Array.prototype.indexOf)return e.indexOf(t);for(var r=0,n=e.length;r=0;r--)!0===t(e[r])&&e.splice(r,1)}function s(e){throw new Error("Unhandled case for value: '"+e+"'")}var l=function(){function e(e){void 0===e&&(e={}),this.tagName="",this.attrs={},this.innerHTML="",this.whitespaceRegex=/\s+/,this.tagName=e.tagName||"",this.attrs=e.attrs||{},this.innerHTML=e.innerHtml||e.innerHTML||""}return e.prototype.setTagName=function(e){return this.tagName=e,this},e.prototype.getTagName=function(){return this.tagName||""},e.prototype.setAttr=function(e,t){return this.getAttrs()[e]=t,this},e.prototype.getAttr=function(e){return this.getAttrs()[e]},e.prototype.setAttrs=function(e){return Object.assign(this.getAttrs(),e),this},e.prototype.getAttrs=function(){return this.attrs||(this.attrs={})},e.prototype.setClass=function(e){return this.setAttr("class",e)},e.prototype.addClass=function(e){for(var t,r=this.getClass(),n=this.whitespaceRegex,o=r?r.split(n):[],i=e.split(n);t=i.shift();)-1===a(o,t)&&o.push(t);return this.getAttrs().class=o.join(" "),this},e.prototype.removeClass=function(e){for(var t,r=this.getClass(),n=this.whitespaceRegex,o=r?r.split(n):[],i=e.split(n);o.length&&(t=i.shift());){var s=a(o,t);-1!==s&&o.splice(s,1)}return this.getAttrs().class=o.join(" "),this},e.prototype.getClass=function(){return this.getAttrs().class||""},e.prototype.hasClass=function(e){return-1!==(" "+this.getClass()+" ").indexOf(" "+e+" ")},e.prototype.setInnerHTML=function(e){return this.innerHTML=e,this},e.prototype.setInnerHtml=function(e){return this.setInnerHTML(e)},e.prototype.getInnerHTML=function(){return this.innerHTML||""},e.prototype.getInnerHtml=function(){return this.getInnerHTML()},e.prototype.toAnchorString=function(){var e=this.getTagName(),t=this.buildAttrsStr();return["<",e,t=t?" "+t:"",">",this.getInnerHtml(),""].join("")},e.prototype.buildAttrsStr=function(){if(!this.attrs)return"";var e=this.getAttrs(),t=[];for(var r in e)e.hasOwnProperty(r)&&t.push(r+'="'+e[r]+'"');return t.join(" ")},e}();var u=function(){function e(e){void 0===e&&(e={}),this.newWindow=!1,this.truncate={},this.className="",this.newWindow=e.newWindow||!1,this.truncate=e.truncate||{},this.className=e.className||""}return e.prototype.build=function(e){return new l({tagName:"a",attrs:this.createAttrs(e),innerHtml:this.processAnchorText(e.getAnchorText())})},e.prototype.createAttrs=function(e){var t={href:e.getAnchorHref()},r=this.createCssClass(e);return r&&(t.class=r),this.newWindow&&(t.target="_blank",t.rel="noopener noreferrer"),this.truncate&&this.truncate.length&&this.truncate.length=s)return l.host.length==t?(l.host.substr(0,t-o)+r).substr(0,s+n):i(c,s).substr(0,s+n);var p="";if(l.path&&(p+="/"+l.path),l.query&&(p+="?"+l.query),p){if((c+p).length>=s)return(c+p).length==t?(c+p).substr(0,t):(c+i(p,s-c.length)).substr(0,s+n);c+=p}if(l.fragment){var f="#"+l.fragment;if((c+f).length>=s)return(c+f).length==t?(c+f).substr(0,t):(c+i(f,s-c.length)).substr(0,s+n);c+=f}if(l.scheme&&l.host){var h=l.scheme+"://";if((c+h).length0&&(d=c.substr(-1*Math.floor(s/2))),(c.substr(0,Math.ceil(s/2))+r+d).substr(0,s+n)}(e,r):"middle"===n?function(e,t,r){if(e.length<=t)return e;var n,o;null==r?(r="…",n=8,o=3):(n=r.length,o=r.length);var a=t-o,i="";return a>0&&(i=e.substr(-1*Math.floor(a/2))),(e.substr(0,Math.ceil(a/2))+r+i).substr(0,a+n)}(e,r):function(e,t,r){return function(e,t,r){var n;return e.length>t&&(null==r?(r="…",n=3):n=r.length,e=e.substring(0,t-n)+r),e}(e,t,r)}(e,r)},e}(),c=function(){function e(e){this.__jsduckDummyDocProp=null,this.matchedText="",this.offset=0,this.tagBuilder=e.tagBuilder,this.matchedText=e.matchedText,this.offset=e.offset}return e.prototype.getMatchedText=function(){return this.matchedText},e.prototype.setOffset=function(e){this.offset=e},e.prototype.getOffset=function(){return this.offset},e.prototype.getCssClassSuffixes=function(){return[this.getType()]},e.prototype.buildTag=function(){return this.tagBuilder.build(this)},e}(),p=function(e,t){return p=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var r in t)t.hasOwnProperty(r)&&(e[r]=t[r])},p(e,t)};function f(e,t){function r(){this.constructor=e}p(e,t),e.prototype=null===t?Object.create(t):(r.prototype=t.prototype,new r)}var h=function(){return h=Object.assign||function(e){for(var t,r=1,n=arguments.length;r-1},e.isValidUriScheme=function(e){var t=e.match(this.uriSchemeRegex),r=t&&t[0].toLowerCase();return"javascript:"!==r&&"vbscript:"!==r},e.urlMatchDoesNotHaveProtocolOrDot=function(e,t){return!(!e||t&&this.hasFullProtocolRegex.test(t)||-1!==e.indexOf("."))},e.urlMatchDoesNotHaveAtLeastOneWordChar=function(e,t){return!(!e||!t)&&(!this.hasFullProtocolRegex.test(t)&&!this.hasWordCharAfterProtocolRegex.test(e))},e.hasFullProtocolRegex=/^[A-Za-z][-.+A-Za-z0-9]*:\/\//,e.uriSchemeRegex=/^[A-Za-z][-.+A-Za-z0-9]*:/,e.hasWordCharAfterProtocolRegex=new RegExp(":[^\\s]*?["+C+"]"),e.ipRegex=/[0-9][0-9]?[0-9]?\.[0-9][0-9]?[0-9]?\.[0-9][0-9]?[0-9]?\.[0-9][0-9]?[0-9]?(:[0-9]*)?\/?$/,e}(),V=(d=new RegExp("[/?#](?:["+N+"\\-+&@#/%=~_()|'$*\\[\\]{}?!:,.;^✓]*["+N+"\\-+&@#/%=~_()|'$*\\[\\]{}✓])?"),new RegExp(["(?:","(",/(?:[A-Za-z][-.+A-Za-z0-9]{0,63}:(?![A-Za-z][-.+A-Za-z0-9]{0,63}:\/\/)(?!\d+\/?)(?:\/\/)?)/.source,M(2),")","|","(","(//)?",/(?:www\.)/.source,M(6),")","|","(","(//)?",M(10)+"\\.",L.source,"(?![-"+I+"])",")",")","(?::[0-9]+)?","(?:"+d.source+")?"].join(""),"gi")),$=new RegExp("["+N+"]"),W=function(e){function t(t){var r=e.call(this,t)||this;return r.stripPrefix={scheme:!0,www:!0},r.stripTrailingSlash=!0,r.decodePercentEncoding=!0,r.matcherRegex=V,r.wordCharRegExp=$,r.stripPrefix=t.stripPrefix,r.stripTrailingSlash=t.stripTrailingSlash,r.decodePercentEncoding=t.decodePercentEncoding,r}return f(t,e),t.prototype.parseMatches=function(e){for(var t,r=this.matcherRegex,n=this.stripPrefix,o=this.stripTrailingSlash,a=this.decodePercentEncoding,i=this.tagBuilder,s=[],l=function(){var r=t[0],l=t[1],c=t[4],p=t[5],f=t[9],h=t.index,d=p||f,m=e.charAt(h-1);if(!q.isValid(r,l))return"continue";if(h>0&&"@"===m)return"continue";if(h>0&&d&&u.wordCharRegExp.test(m))return"continue";if(/\?$/.test(r)&&(r=r.substr(0,r.length-1)),u.matchHasUnbalancedClosingParen(r))r=r.substr(0,r.length-1);else{var g=u.matchHasInvalidCharAfterTld(r,l);g>-1&&(r=r.substr(0,g))}var v=["http://","https://"].find((function(e){return!!l&&-1!==l.indexOf(e)}));if(v){var y=r.indexOf(v);r=r.substr(y),l=l.substr(y),h+=y}var w=l?"scheme":c?"www":"tld",E=!!l;s.push(new b({tagBuilder:i,matchedText:r,offset:h,urlMatchType:w,url:r,protocolUrlMatch:E,protocolRelativeMatch:!!d,stripPrefix:n,stripTrailingSlash:o,decodePercentEncoding:a}))},u=this;null!==(t=r.exec(e));)l();return s},t.prototype.matchHasUnbalancedClosingParen=function(e){var t,r=e.charAt(e.length-1);if(")"===r)t="(";else if("]"===r)t="[";else{if("}"!==r)return!1;t="{"}for(var n=0,o=0,a=e.length-1;o"===e?(m=new ne(h(h({},m),{name:H()})),W()):E.test(e)||x.test(e)||":"===e||V()}function w(e){">"===e?V():E.test(e)?f=3:V()}function _(e){S.test(e)||("/"===e?f=12:">"===e?W():"<"===e?$():"="===e||A.test(e)||k.test(e)?V():f=5)}function C(e){S.test(e)?f=6:"/"===e?f=12:"="===e?f=7:">"===e?W():"<"===e?$():A.test(e)&&V()}function O(e){S.test(e)||("/"===e?f=12:"="===e?f=7:">"===e?W():"<"===e?$():A.test(e)?V():f=5)}function j(e){S.test(e)||('"'===e?f=8:"'"===e?f=9:/[>=`]/.test(e)?V():"<"===e?$():f=10)}function I(e){'"'===e&&(f=11)}function N(e){"'"===e&&(f=11)}function T(e){S.test(e)?f=4:">"===e?W():"<"===e&&$()}function P(e){S.test(e)?f=4:"/"===e?f=12:">"===e?W():"<"===e?$():(f=4,c--)}function R(e){">"===e?(m=new ne(h(h({},m),{isClosing:!0})),W()):f=4}function M(t){"--"===e.substr(c,2)?(c+=2,m=new ne(h(h({},m),{type:"comment"})),f=14):"DOCTYPE"===e.substr(c,7).toUpperCase()?(c+=7,m=new ne(h(h({},m),{type:"doctype"})),f=20):V()}function D(e){"-"===e?f=15:">"===e?V():f=16}function L(e){"-"===e?f=18:">"===e?V():f=16}function B(e){"-"===e&&(f=17)}function F(e){f="-"===e?18:16}function z(e){">"===e?W():"!"===e?f=19:"-"===e||(f=16)}function U(e){"-"===e?f=17:">"===e?W():f=16}function q(e){">"===e?W():"<"===e&&$()}function V(){f=0,m=u}function $(){f=1,m=new ne({idx:c})}function W(){var t=e.slice(d,m.idx);t&&a(t,d),"comment"===m.type?i(m.idx):"doctype"===m.type?l(m.idx):(m.isOpening&&n(m.name,m.idx),m.isClosing&&o(m.name,m.idx)),V(),d=c+1}function H(){var t=m.idx+(m.isClosing?2:1);return e.slice(t,c).toLowerCase()}d=0&&n++},onText:function(e,r){if(0===n){var a=function(e,t){if(!t.global)throw new Error("`splitRegex` must have the 'g' flag set");for(var r,n=[],o=0;r=t.exec(e);)n.push(e.substring(o,r.index)),n.push(r[0]),o=r.index+r[0].length;return n.push(e.substring(o)),n}(e,/( | |<|<|>|>|"|"|')/gi),i=r;a.forEach((function(e,r){if(r%2==0){var n=t.parseText(e,i);o.push.apply(o,n)}i+=e.length}))}},onCloseTag:function(e){r.indexOf(e)>=0&&(n=Math.max(n-1,0))},onComment:function(e){},onDoctype:function(e){}}),o=this.compactMatches(o),o=this.removeUnwantedMatches(o)},e.prototype.compactMatches=function(e){e.sort((function(e,t){return e.getOffset()-t.getOffset()}));for(var t=0;to?t:t+1;e.splice(i,1);continue}e[t+1].getOffset()/g,">"));for(var t=this.parse(e),r=[],n=0,o=0,a=t.length;o/i.test(e)}function se(){var e=[],t=new oe({stripPrefix:!1,url:!0,email:!0,replaceFn:function(t){switch(t.getType()){case"url":e.push({text:t.matchedText,url:t.getUrl()});break;case"email":e.push({text:t.matchedText,url:"mailto:"+t.getEmail().replace(/^mailto:/i,"")})}return!1}});return{links:e,autolinker:t}}function le(e){var t,r,n,o,a,i,s,l,u,c,p,f,h,d,m=e.tokens,g=null;for(r=0,n=m.length;r=0;t--)if("link_close"!==(a=o[t]).type){if("htmltag"===a.type&&(d=a.content,/^\s]/i.test(d)&&p>0&&p--,ie(a.content)&&p++),!(p>0)&&"text"===a.type&&ae.test(a.content)){if(g||(f=(g=se()).links,h=g.autolinker),i=a.content,f.length=0,h.link(i),!f.length)continue;for(s=[],c=a.level,l=0;l({useUnsafeMarkdown:!1})};const me=de;function ge(e){let{useUnsafeMarkdown:t=!1}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const r=t,n=t?[]:["style","class"];return t&&!ge.hasWarnedAboutDeprecation&&(console.warn("useUnsafeMarkdown display configuration parameter is deprecated since >3.26.0 and will be removed in v4.0.0."),ge.hasWarnedAboutDeprecation=!0),pe().sanitize(e,{ADD_ATTR:["target"],FORBID_TAGS:["style","form"],ALLOW_DATA_ATTR:r,FORBID_ATTR:n})}ge.hasWarnedAboutDeprecation=!1},45308:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>f});var n,o=r(86),a=r.n(o),i=r(8712),s=r.n(i),l=r(90242),u=r(27621);const c=r(95102),p={},f=p;a()(n=s()(c).call(c)).call(n,(function(e){if("./index.js"===e)return;let t=c(e);p[(0,l.Zl)(e)]=t.default?t.default:t})),p.SafeRender=u.default},55812:(e,t,r)=>{"use strict";r.r(t),r.d(t,{SHOW_AUTH_POPUP:()=>p,AUTHORIZE:()=>f,LOGOUT:()=>h,PRE_AUTHORIZE_OAUTH2:()=>d,AUTHORIZE_OAUTH2:()=>m,VALIDATE:()=>g,CONFIGURE_AUTH:()=>v,RESTORE_AUTHORIZATION:()=>y,showDefinitions:()=>b,authorize:()=>w,authorizeWithPersistOption:()=>E,logout:()=>x,logoutWithPersistOption:()=>_,preAuthorizeImplicit:()=>S,authorizeOauth2:()=>A,authorizeOauth2WithPersistOption:()=>k,authorizePassword:()=>C,authorizeApplication:()=>O,authorizeAccessCodeWithFormParams:()=>j,authorizeAccessCodeWithBasicAuthentication:()=>I,authorizeRequest:()=>N,configureAuth:()=>T,restoreAuthorization:()=>P,persistAuthorizationIfNeeded:()=>R,authPopup:()=>M});var n=r(35627),o=r.n(n),a=r(76986),i=r.n(a),s=r(84564),l=r.n(s),u=r(27504),c=r(90242);const p="show_popup",f="authorize",h="logout",d="pre_authorize_oauth2",m="authorize_oauth2",g="validate",v="configure_auth",y="restore_authorization";function b(e){return{type:p,payload:e}}function w(e){return{type:f,payload:e}}const E=e=>t=>{let{authActions:r}=t;r.authorize(e),r.persistAuthorizationIfNeeded()};function x(e){return{type:h,payload:e}}const _=e=>t=>{let{authActions:r}=t;r.logout(e),r.persistAuthorizationIfNeeded()},S=e=>t=>{let{authActions:r,errActions:n}=t,{auth:a,token:i,isValid:s}=e,{schema:l,name:c}=a,p=l.get("flow");delete u.Z.swaggerUIRedirectOauth2,"accessCode"===p||s||n.newAuthErr({authId:c,source:"auth",level:"warning",message:"Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"}),i.error?n.newAuthErr({authId:c,source:"auth",level:"error",message:o()(i)}):r.authorizeOauth2WithPersistOption({auth:a,token:i})};function A(e){return{type:m,payload:e}}const k=e=>t=>{let{authActions:r}=t;r.authorizeOauth2(e),r.persistAuthorizationIfNeeded()},C=e=>t=>{let{authActions:r}=t,{schema:n,name:o,username:a,password:s,passwordType:l,clientId:u,clientSecret:p}=e,f={grant_type:"password",scope:e.scopes.join(" "),username:a,password:s},h={};switch(l){case"request-body":!function(e,t,r){t&&i()(e,{client_id:t});r&&i()(e,{client_secret:r})}(f,u,p);break;case"basic":h.Authorization="Basic "+(0,c.r3)(u+":"+p);break;default:console.warn(`Warning: invalid passwordType ${l} was passed, not including client id and secret`)}return r.authorizeRequest({body:(0,c.GZ)(f),url:n.get("tokenUrl"),name:o,headers:h,query:{},auth:e})};const O=e=>t=>{let{authActions:r}=t,{schema:n,scopes:o,name:a,clientId:i,clientSecret:s}=e,l={Authorization:"Basic "+(0,c.r3)(i+":"+s)},u={grant_type:"client_credentials",scope:o.join(" ")};return r.authorizeRequest({body:(0,c.GZ)(u),name:a,url:n.get("tokenUrl"),auth:e,headers:l})},j=e=>{let{auth:t,redirectUrl:r}=e;return e=>{let{authActions:n}=e,{schema:o,name:a,clientId:i,clientSecret:s,codeVerifier:l}=t,u={grant_type:"authorization_code",code:t.code,client_id:i,client_secret:s,redirect_uri:r,code_verifier:l};return n.authorizeRequest({body:(0,c.GZ)(u),name:a,url:o.get("tokenUrl"),auth:t})}},I=e=>{let{auth:t,redirectUrl:r}=e;return e=>{let{authActions:n}=e,{schema:o,name:a,clientId:i,clientSecret:s,codeVerifier:l}=t,u={Authorization:"Basic "+(0,c.r3)(i+":"+s)},p={grant_type:"authorization_code",code:t.code,client_id:i,redirect_uri:r,code_verifier:l};return n.authorizeRequest({body:(0,c.GZ)(p),name:a,url:o.get("tokenUrl"),auth:t,headers:u})}},N=e=>t=>{let r,{fn:n,getConfigs:a,authActions:s,errActions:u,oas3Selectors:c,specSelectors:p,authSelectors:f}=t,{body:h,query:d={},headers:m={},name:g,url:v,auth:y}=e,{additionalQueryStringParams:b}=f.getConfigs()||{};if(p.isOAS3()){let e=c.serverEffectiveValue(c.selectedServer());r=l()(v,e,!0)}else r=l()(v,p.url(),!0);"object"==typeof b&&(r.query=i()({},r.query,b));const w=r.toString();let E=i()({Accept:"application/json, text/plain, */*","Content-Type":"application/x-www-form-urlencoded","X-Requested-With":"XMLHttpRequest"},m);n.fetch({url:w,method:"post",headers:E,query:d,body:h,requestInterceptor:a().requestInterceptor,responseInterceptor:a().responseInterceptor}).then((function(e){let t=JSON.parse(e.data),r=t&&(t.error||""),n=t&&(t.parseError||"");e.ok?r||n?u.newAuthErr({authId:g,level:"error",source:"auth",message:o()(t)}):s.authorizeOauth2WithPersistOption({auth:y,token:t}):u.newAuthErr({authId:g,level:"error",source:"auth",message:e.statusText})})).catch((e=>{let t=new Error(e).message;if(e.response&&e.response.data){const r=e.response.data;try{const e="string"==typeof r?JSON.parse(r):r;e.error&&(t+=`, error: ${e.error}`),e.error_description&&(t+=`, description: ${e.error_description}`)}catch(e){}}u.newAuthErr({authId:g,level:"error",source:"auth",message:t})}))};function T(e){return{type:v,payload:e}}function P(e){return{type:y,payload:e}}const R=()=>e=>{let{authSelectors:t,getConfigs:r}=e;if(r().persistAuthorization){const e=t.authorized();localStorage.setItem("authorized",o()(e.toJS()))}},M=(e,t)=>()=>{u.Z.swaggerUIRedirectOauth2=t,u.Z.open(e)}},93705:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>u,preauthorizeBasic:()=>c,preauthorizeApiKey:()=>p});var n=r(11189),o=r.n(n),a=r(43962),i=r(55812),s=r(60035),l=r(48302);function u(){return{afterLoad(e){this.rootInjects=this.rootInjects||{},this.rootInjects.initOAuth=e.authActions.configureAuth,this.rootInjects.preauthorizeApiKey=o()(p).call(p,null,e),this.rootInjects.preauthorizeBasic=o()(c).call(c,null,e)},statePlugins:{auth:{reducers:a.default,actions:i,selectors:s},spec:{wrapActions:l}}}}function c(e,t,r,n){const{authActions:{authorize:o},specSelectors:{specJson:a,isOAS3:i}}=e,s=i()?["components","securitySchemes"]:["securityDefinitions"],l=a().getIn([...s,t]);return l?o({[t]:{value:{username:r,password:n},schema:l.toJS()}}):null}function p(e,t,r){const{authActions:{authorize:n},specSelectors:{specJson:o,isOAS3:a}}=e,i=a()?["components","securitySchemes"]:["securityDefinitions"],s=o().getIn([...i,t]);return s?n({[t]:{value:r,schema:s.toJS()}}):null}},43962:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>c});var n=r(86),o=r.n(n),a=r(76986),i=r.n(a),s=r(43393),l=r(90242),u=r(55812);const c={[u.SHOW_AUTH_POPUP]:(e,t)=>{let{payload:r}=t;return e.set("showDefinitions",r)},[u.AUTHORIZE]:(e,t)=>{var r;let{payload:n}=t,a=(0,s.fromJS)(n),i=e.get("authorized")||(0,s.Map)();return o()(r=a.entrySeq()).call(r,(t=>{let[r,n]=t;if(!(0,l.Wl)(n.getIn))return e.set("authorized",i);let o=n.getIn(["schema","type"]);if("apiKey"===o||"http"===o)i=i.set(r,n);else if("basic"===o){let e=n.getIn(["value","username"]),t=n.getIn(["value","password"]);i=i.setIn([r,"value"],{username:e,header:"Basic "+(0,l.r3)(e+":"+t)}),i=i.setIn([r,"schema"],n.get("schema"))}})),e.set("authorized",i)},[u.AUTHORIZE_OAUTH2]:(e,t)=>{let r,{payload:n}=t,{auth:o,token:a}=n;o.token=i()({},a),r=(0,s.fromJS)(o);let l=e.get("authorized")||(0,s.Map)();return l=l.set(r.get("name"),r),e.set("authorized",l)},[u.LOGOUT]:(e,t)=>{let{payload:r}=t,n=e.get("authorized").withMutations((e=>{o()(r).call(r,(t=>{e.delete(t)}))}));return e.set("authorized",n)},[u.CONFIGURE_AUTH]:(e,t)=>{let{payload:r}=t;return e.set("configs",r)},[u.RESTORE_AUTHORIZATION]:(e,t)=>{let{payload:r}=t;return e.set("authorized",(0,s.fromJS)(r.authorized))}}},60035:(e,t,r)=>{"use strict";r.r(t),r.d(t,{shownDefinitions:()=>y,definitionsToAuthorize:()=>b,getDefinitionsByNames:()=>w,definitionsForRequirements:()=>E,authorized:()=>x,isAuthorized:()=>_,getConfigs:()=>S});var n=r(86),o=r.n(n),a=r(51679),i=r.n(a),s=r(14418),l=r.n(s),u=r(11882),c=r.n(u),p=r(97606),f=r.n(p),h=r(28222),d=r.n(h),m=r(20573),g=r(43393);const v=e=>e,y=(0,m.P1)(v,(e=>e.get("showDefinitions"))),b=(0,m.P1)(v,(()=>e=>{var t;let{specSelectors:r}=e,n=r.securityDefinitions()||(0,g.Map)({}),a=(0,g.List)();return o()(t=n.entrySeq()).call(t,(e=>{let[t,r]=e,n=(0,g.Map)();n=n.set(t,r),a=a.push(n)})),a})),w=(e,t)=>e=>{var r;let{specSelectors:n}=e;console.warn("WARNING: getDefinitionsByNames is deprecated and will be removed in the next major version.");let a=n.securityDefinitions(),i=(0,g.List)();return o()(r=t.valueSeq()).call(r,(e=>{var t;let r=(0,g.Map)();o()(t=e.entrySeq()).call(t,(e=>{let t,[n,i]=e,s=a.get(n);var l;"oauth2"===s.get("type")&&i.size&&(t=s.get("scopes"),o()(l=t.keySeq()).call(l,(e=>{i.contains(e)||(t=t.delete(e))})),s=s.set("allowedScopes",t));r=r.set(n,s)})),i=i.push(r)})),i},E=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:(0,g.List)();return e=>{let{authSelectors:r}=e;const n=r.definitionsToAuthorize()||(0,g.List)();let a=(0,g.List)();return o()(n).call(n,(e=>{let r=i()(t).call(t,(t=>t.get(e.keySeq().first())));r&&(o()(e).call(e,((t,n)=>{if("oauth2"===t.get("type")){const i=r.get(n);let s=t.get("scopes");var a;if(g.List.isList(i)&&g.Map.isMap(s))o()(a=s.keySeq()).call(a,(e=>{i.contains(e)||(s=s.delete(e))})),e=e.set(n,t.set("scopes",s))}})),a=a.push(e))})),a}},x=(0,m.P1)(v,(e=>e.get("authorized")||(0,g.Map)())),_=(e,t)=>e=>{var r;let{authSelectors:n}=e,o=n.authorized();return g.List.isList(t)?!!l()(r=t.toJS()).call(r,(e=>{var t,r;return-1===c()(t=f()(r=d()(e)).call(r,(e=>!!o.get(e)))).call(t,!1)})).length:null},S=(0,m.P1)(v,(e=>e.get("configs")))},48302:(e,t,r)=>{"use strict";r.r(t),r.d(t,{execute:()=>n});const n=(e,t)=>{let{authSelectors:r,specSelectors:n}=t;return t=>{let{path:o,method:a,operation:i,extras:s}=t,l={authorized:r.authorized()&&r.authorized().toJS(),definitions:n.securityDefinitions()&&n.securityDefinitions().toJS(),specSecurity:n.security()&&n.security().toJS()};return e({path:o,method:a,operation:i,securities:l,...s})}}},70714:(e,t,r)=>{"use strict";r.r(t),r.d(t,{UPDATE_CONFIGS:()=>n,TOGGLE_CONFIGS:()=>o,update:()=>a,toggle:()=>i,loaded:()=>s});const n="configs_update",o="configs_toggle";function a(e,t){return{type:n,payload:{[e]:t}}}function i(e){return{type:o,payload:e}}const s=()=>e=>{let{getConfigs:t,authActions:r}=e;if(t().persistAuthorization){const e=localStorage.getItem("authorized");e&&r.restoreAuthorization({authorized:JSON.parse(e)})}}},92256:(e,t,r)=>{"use strict";r.r(t),r.d(t,{parseYamlConfig:()=>o});var n=r(1272);const o=(e,t)=>{try{return n.ZP.load(e)}catch(e){return t&&t.errActions.newThrownErr(new Error(e)),{}}}},1661:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>c});var n=r(15163),o=r(92256),a=r(70714),i=r(22698),s=r(69018),l=r(37743);const u={getLocalConfig:()=>(0,o.parseYamlConfig)(n)};function c(){return{statePlugins:{spec:{actions:i,selectors:u},configs:{reducers:l.default,actions:a,selectors:s}}}}},37743:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>a});var n=r(43393),o=r(70714);const a={[o.UPDATE_CONFIGS]:(e,t)=>e.merge((0,n.fromJS)(t.payload)),[o.TOGGLE_CONFIGS]:(e,t)=>{const r=t.payload,n=e.get(r);return e.set(r,!n)}}},69018:(e,t,r)=>{"use strict";r.r(t),r.d(t,{get:()=>a});var n=r(58309),o=r.n(n);const a=(e,t)=>e.getIn(o()(t)?t:[t])},22698:(e,t,r)=>{"use strict";r.r(t),r.d(t,{downloadConfig:()=>o,getConfigByUrl:()=>a});var n=r(92256);const o=e=>t=>{const{fn:{fetch:r}}=t;return r(e)},a=(e,t)=>r=>{let{specActions:o}=r;if(e)return o.downloadConfig(e).then(a,a);function a(r){r instanceof Error||r.status>=400?(o.updateLoadingStatus("failedConfig"),o.updateLoadingStatus("failedConfig"),o.updateUrl(""),console.error(r.statusText+" "+e.url),t(null)):t((0,n.parseYamlConfig)(r.text))}}},31970:(e,t,r)=>{"use strict";r.r(t),r.d(t,{setHash:()=>n});const n=e=>e?history.pushState(null,null,`#${e}`):window.location.hash=""},34980:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(41599),o=r(60877),a=r(34584);function i(){return[n.default,{statePlugins:{configs:{wrapActions:{loaded:(e,t)=>function(){e(...arguments);const r=decodeURIComponent(window.location.hash);t.layoutActions.parseDeepLinkHash(r)}}}},wrapComponents:{operation:o.default,OperationTag:a.default}}]}},41599:(e,t,r)=>{"use strict";r.r(t),r.d(t,{show:()=>b,scrollTo:()=>w,parseDeepLinkHash:()=>E,readyToScroll:()=>x,scrollToElement:()=>_,clearScrollTo:()=>S,default:()=>A});var n=r(58309),o=r.n(n),a=r(24278),i=r.n(a),s=r(97606),l=r.n(s),u=r(11882),c=r.n(u),p=r(31970),f=r(45172),h=r.n(f),d=r(90242),m=r(43393),g=r.n(m);const v="layout_scroll_to",y="layout_clear_scroll",b=(e,t)=>{let{getConfigs:r,layoutSelectors:n}=t;return function(){for(var t=arguments.length,a=new Array(t),i=0;i({type:v,payload:o()(e)?e:[e]}),E=e=>t=>{let{layoutActions:r,layoutSelectors:n,getConfigs:o}=t;if(o().deepLinking&&e){var a;let t=i()(e).call(e,1);"!"===t[0]&&(t=i()(t).call(t,1)),"/"===t[0]&&(t=i()(t).call(t,1));const o=l()(a=t.split("/")).call(a,(e=>e||"")),s=n.isShownKeyFromUrlHashArray(o),[u,p="",f=""]=s;if("operations"===u){const e=n.isShownKeyFromUrlHashArray([p]);c()(p).call(p,"_")>-1&&(console.warn("Warning: escaping deep link whitespace with `_` will be unsupported in v4.0, use `%20` instead."),r.show(l()(e).call(e,(e=>e.replace(/_/g," "))),!0)),r.show(e,!0)}(c()(p).call(p,"_")>-1||c()(f).call(f,"_")>-1)&&(console.warn("Warning: escaping deep link whitespace with `_` will be unsupported in v4.0, use `%20` instead."),r.show(l()(s).call(s,(e=>e.replace(/_/g," "))),!0)),r.show(s,!0),r.scrollTo(s)}},x=(e,t)=>r=>{const n=r.layoutSelectors.getScrollToKey();g().is(n,(0,m.fromJS)(e))&&(r.layoutActions.scrollToElement(t),r.layoutActions.clearScrollTo())},_=(e,t)=>r=>{try{t=t||r.fn.getScrollParent(e),h().createScroller(t).to(e)}catch(e){console.error(e)}},S=()=>({type:y});const A={fn:{getScrollParent:function(e,t){const r=document.documentElement;let n=getComputedStyle(e);const o="absolute"===n.position,a=t?/(auto|scroll|hidden)/:/(auto|scroll)/;if("fixed"===n.position)return r;for(let t=e;t=t.parentElement;)if(n=getComputedStyle(t),(!o||"static"!==n.position)&&a.test(n.overflow+n.overflowY+n.overflowX))return t;return r}},statePlugins:{layout:{actions:{scrollToElement:_,scrollTo:w,clearScrollTo:S,readyToScroll:x,parseDeepLinkHash:E},selectors:{getScrollToKey:e=>e.get("scrollToKey"),isShownKeyFromUrlHashArray(e,t){const[r,n]=t;return n?["operations",r,n]:r?["operations-tag",r]:[]},urlHashArrayFromIsShownKey(e,t){let[r,n,o]=t;return"operations"==r?[n,o]:"operations-tag"==r?[n]:[]}},reducers:{[v]:(e,t)=>e.set("scrollToKey",g().fromJS(t.payload)),[y]:e=>e.delete("scrollToKey")},wrapActions:{show:b}}}}},34584:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(61125),o=r.n(n),a=r(67294);const i=(e,t)=>class extends a.Component{constructor(){super(...arguments),o()(this,"onLoad",(e=>{const{tag:r}=this.props,n=["operations-tag",r];t.layoutActions.readyToScroll(n,e)}))}render(){return a.createElement("span",{ref:this.onLoad},a.createElement(e,this.props))}}},60877:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(61125),o=r.n(n),a=r(67294);r(23930);const i=(e,t)=>class extends a.Component{constructor(){super(...arguments),o()(this,"onLoad",(e=>{const{operation:r}=this.props,{tag:n,operationId:o}=r.toObject();let{isShownKey:a}=r.toObject();a=a||["operations",n,o],t.layoutActions.readyToScroll(a,e)}))}render(){return a.createElement("span",{ref:this.onLoad},a.createElement(e,this.props))}}},48011:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>d});var n=r(76986),o=r.n(n),a=r(63460),i=r.n(a),s=r(11882),l=r.n(s),u=r(35627),c=r.n(u),p=r(20573),f=r(43393),h=r(27504);function d(e){let{fn:t}=e;return{statePlugins:{spec:{actions:{download:e=>r=>{let{errActions:n,specSelectors:a,specActions:s,getConfigs:l}=r,{fetch:u}=t;const c=l();function p(t){if(t instanceof Error||t.status>=400)return s.updateLoadingStatus("failed"),n.newThrownErr(o()(new Error((t.message||t.statusText)+" "+e),{source:"fetch"})),void(!t.status&&t instanceof Error&&function(){try{let t;if("URL"in h.Z?t=new(i())(e):(t=document.createElement("a"),t.href=e),"https:"!==t.protocol&&"https:"===h.Z.location.protocol){const e=o()(new Error(`Possible mixed-content issue? The page was loaded over https:// but a ${t.protocol}// URL was specified. Check that you are not attempting to load mixed content.`),{source:"fetch"});return void n.newThrownErr(e)}if(t.origin!==h.Z.location.origin){const e=o()(new Error(`Possible cross-origin (CORS) issue? The URL origin (${t.origin}) does not match the page (${h.Z.location.origin}). Check the server returns the correct 'Access-Control-Allow-*' headers.`),{source:"fetch"});n.newThrownErr(e)}}catch(e){return}}());s.updateLoadingStatus("success"),s.updateSpec(t.text),a.url()!==e&&s.updateUrl(e)}e=e||a.url(),s.updateLoadingStatus("loading"),n.clear({source:"fetch"}),u({url:e,loadSpec:!0,requestInterceptor:c.requestInterceptor||(e=>e),responseInterceptor:c.responseInterceptor||(e=>e),credentials:"same-origin",headers:{Accept:"application/json,*/*"}}).then(p,p)},updateLoadingStatus:e=>{let t=[null,"loading","failed","success","failedConfig"];return-1===l()(t).call(t,e)&&console.error(`Error: ${e} is not one of ${c()(t)}`),{type:"spec_update_loading_status",payload:e}}},reducers:{spec_update_loading_status:(e,t)=>"string"==typeof t.payload?e.set("loadingStatus",t.payload):e},selectors:{loadingStatus:(0,p.P1)((e=>e||(0,f.Map)()),(e=>e.get("loadingStatus")||null))}}}}}},34966:(e,t,r)=>{"use strict";r.r(t),r.d(t,{NEW_THROWN_ERR:()=>o,NEW_THROWN_ERR_BATCH:()=>a,NEW_SPEC_ERR:()=>i,NEW_SPEC_ERR_BATCH:()=>s,NEW_AUTH_ERR:()=>l,CLEAR:()=>u,CLEAR_BY:()=>c,newThrownErr:()=>p,newThrownErrBatch:()=>f,newSpecErr:()=>h,newSpecErrBatch:()=>d,newAuthErr:()=>m,clear:()=>g,clearBy:()=>v});var n=r(7710);const o="err_new_thrown_err",a="err_new_thrown_err_batch",i="err_new_spec_err",s="err_new_spec_err_batch",l="err_new_auth_err",u="err_clear",c="err_clear_by";function p(e){return{type:o,payload:(0,n.serializeError)(e)}}function f(e){return{type:a,payload:e}}function h(e){return{type:i,payload:e}}function d(e){return{type:s,payload:e}}function m(e){return{type:l,payload:e}}function g(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{type:u,payload:e}}function v(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:()=>!0;return{type:c,payload:e}}},56982:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>c});var n=r(14418),o=r.n(n),a=r(97606),i=r.n(a),s=r(54061),l=r.n(s);const u=[r(2392),r(21835)];function c(e){var t;let r={jsSpec:{}},n=l()(u,((e,t)=>{try{let n=t.transform(e,r);return o()(n).call(n,(e=>!!e))}catch(t){return console.error("Transformer error:",t),e}}),e);return i()(t=o()(n).call(n,(e=>!!e))).call(t,(e=>(!e.get("line")&&e.get("path"),e)))}},2392:(e,t,r)=>{"use strict";r.r(t),r.d(t,{transform:()=>p});var n=r(97606),o=r.n(n),a=r(11882),i=r.n(a),s=r(24278),l=r.n(s),u=r(24282),c=r.n(u);function p(e){return o()(e).call(e,(e=>{var t;let r="is not of a type(s)",n=i()(t=e.get("message")).call(t,r);if(n>-1){var o,a;let t=l()(o=e.get("message")).call(o,n+r.length).split(",");return e.set("message",l()(a=e.get("message")).call(a,0,n)+function(e){return c()(e).call(e,((e,t,r,n)=>r===n.length-1&&n.length>1?e+"or "+t:n[r+1]&&n.length>2?e+t+", ":n[r+1]?e+t+" ":e+t),"should be a")}(t))}return e}))}},21835:(e,t,r)=>{"use strict";r.r(t),r.d(t,{transform:()=>n});r(97606),r(11882),r(27361),r(43393);function n(e,t){let{jsSpec:r}=t;return e}},77793:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(93527),o=r(34966),a=r(87667);function i(e){return{statePlugins:{err:{reducers:(0,n.default)(e),actions:o,selectors:a}}}}},93527:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>v});var n=r(76986),o=r.n(n),a=r(97606),i=r.n(a),s=r(39022),l=r.n(s),u=r(14418),c=r.n(u),p=r(2250),f=r.n(p),h=r(34966),d=r(43393),m=r(56982);let g={line:0,level:"error",message:"Unknown error"};function v(){return{[h.NEW_THROWN_ERR]:(e,t)=>{let{payload:r}=t,n=o()(g,r,{type:"thrown"});return e.update("errors",(e=>(e||(0,d.List)()).push((0,d.fromJS)(n)))).update("errors",(e=>(0,m.default)(e)))},[h.NEW_THROWN_ERR_BATCH]:(e,t)=>{let{payload:r}=t;return r=i()(r).call(r,(e=>(0,d.fromJS)(o()(g,e,{type:"thrown"})))),e.update("errors",(e=>{var t;return l()(t=e||(0,d.List)()).call(t,(0,d.fromJS)(r))})).update("errors",(e=>(0,m.default)(e)))},[h.NEW_SPEC_ERR]:(e,t)=>{let{payload:r}=t,n=(0,d.fromJS)(r);return n=n.set("type","spec"),e.update("errors",(e=>(e||(0,d.List)()).push((0,d.fromJS)(n)).sortBy((e=>e.get("line"))))).update("errors",(e=>(0,m.default)(e)))},[h.NEW_SPEC_ERR_BATCH]:(e,t)=>{let{payload:r}=t;return r=i()(r).call(r,(e=>(0,d.fromJS)(o()(g,e,{type:"spec"})))),e.update("errors",(e=>{var t;return l()(t=e||(0,d.List)()).call(t,(0,d.fromJS)(r))})).update("errors",(e=>(0,m.default)(e)))},[h.NEW_AUTH_ERR]:(e,t)=>{let{payload:r}=t,n=(0,d.fromJS)(o()({},r));return n=n.set("type","auth"),e.update("errors",(e=>(e||(0,d.List)()).push((0,d.fromJS)(n)))).update("errors",(e=>(0,m.default)(e)))},[h.CLEAR]:(e,t)=>{var r;let{payload:n}=t;if(!n||!e.get("errors"))return e;let o=c()(r=e.get("errors")).call(r,(e=>{var t;return f()(t=e.keySeq()).call(t,(t=>{const r=e.get(t),o=n[t];return!o||r!==o}))}));return e.merge({errors:o})},[h.CLEAR_BY]:(e,t)=>{var r;let{payload:n}=t;if(!n||"function"!=typeof n)return e;let o=c()(r=e.get("errors")).call(r,(e=>n(e)));return e.merge({errors:o})}}}},87667:(e,t,r)=>{"use strict";r.r(t),r.d(t,{allErrors:()=>a,lastError:()=>i});var n=r(43393),o=r(20573);const a=(0,o.P1)((e=>e),(e=>e.get("errors",(0,n.List)()))),i=(0,o.P1)(a,(e=>e.last()))},49978:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(4309);function o(){return{fn:{opsFilter:n.default}}}},4309:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(14418),o=r.n(n),a=r(11882),i=r.n(a);function s(e,t){return o()(e).call(e,((e,r)=>-1!==i()(r).call(r,t)))}},25474:(e,t,r)=>{"use strict";r.r(t),r.d(t,{UPDATE_LAYOUT:()=>o,UPDATE_FILTER:()=>a,UPDATE_MODE:()=>i,SHOW:()=>s,updateLayout:()=>l,updateFilter:()=>u,show:()=>c,changeMode:()=>p});var n=r(90242);const o="layout_update_layout",a="layout_update_filter",i="layout_update_mode",s="layout_show";function l(e){return{type:o,payload:e}}function u(e){return{type:a,payload:e}}function c(e){let t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];return e=(0,n.AF)(e),{type:s,payload:{thing:e,shown:t}}}function p(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return e=(0,n.AF)(e),{type:i,payload:{thing:e,mode:t}}}},26821:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(5672),o=r(25474),a=r(4400),i=r(28989);function s(){return{statePlugins:{layout:{reducers:n.default,actions:o,selectors:a},spec:{wrapSelectors:i}}}}},5672:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(39022),o=r.n(n),a=r(43393),i=r(25474);const s={[i.UPDATE_LAYOUT]:(e,t)=>e.set("layout",t.payload),[i.UPDATE_FILTER]:(e,t)=>e.set("filter",t.payload),[i.SHOW]:(e,t)=>{const r=t.payload.shown,n=(0,a.fromJS)(t.payload.thing);return e.update("shown",(0,a.fromJS)({}),(e=>e.set(n,r)))},[i.UPDATE_MODE]:(e,t)=>{var r;let n=t.payload.thing,a=t.payload.mode;return e.setIn(o()(r=["modes"]).call(r,n),(a||"")+"")}}},4400:(e,t,r)=>{"use strict";r.r(t),r.d(t,{current:()=>i,currentFilter:()=>s,isShown:()=>l,whatMode:()=>u,showSummary:()=>c});var n=r(20573),o=r(90242),a=r(43393);const i=e=>e.get("layout"),s=e=>e.get("filter"),l=(e,t,r)=>(t=(0,o.AF)(t),e.get("shown",(0,a.fromJS)({})).get((0,a.fromJS)(t),r)),u=function(e,t){let r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"";return t=(0,o.AF)(t),e.getIn(["modes",...t],r)},c=(0,n.P1)((e=>e),(e=>!l(e,"editor")))},28989:(e,t,r)=>{"use strict";r.r(t),r.d(t,{taggedOperations:()=>a});var n=r(24278),o=r.n(n);const a=(e,t)=>function(r){for(var n=arguments.length,a=new Array(n>1?n-1:0),i=1;i=0&&(s=o()(s).call(s,0,f)),s}},9150:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>a});var n=r(11189),o=r.n(n);function a(e){let{configs:t}=e;const r={debug:0,info:1,log:2,warn:3,error:4},n=e=>r[e]||-1;let{logLevel:a}=t,i=n(a);function s(e){for(var t=arguments.length,r=new Array(t>1?t-1:0),o=1;o=i&&console[e](...r)}return s.warn=o()(s).call(s,null,"warn"),s.error=o()(s).call(s,null,"error"),s.info=o()(s).call(s,null,"info"),s.debug=o()(s).call(s,null,"debug"),{rootInjects:{log:s}}}},67002:(e,t,r)=>{"use strict";r.r(t),r.d(t,{UPDATE_SELECTED_SERVER:()=>n,UPDATE_REQUEST_BODY_VALUE:()=>o,UPDATE_REQUEST_BODY_VALUE_RETAIN_FLAG:()=>a,UPDATE_REQUEST_BODY_INCLUSION:()=>i,UPDATE_ACTIVE_EXAMPLES_MEMBER:()=>s,UPDATE_REQUEST_CONTENT_TYPE:()=>l,UPDATE_RESPONSE_CONTENT_TYPE:()=>u,UPDATE_SERVER_VARIABLE_VALUE:()=>c,SET_REQUEST_BODY_VALIDATE_ERROR:()=>p,CLEAR_REQUEST_BODY_VALIDATE_ERROR:()=>f,CLEAR_REQUEST_BODY_VALUE:()=>h,setSelectedServer:()=>d,setRequestBodyValue:()=>m,setRetainRequestBodyValueFlag:()=>g,setRequestBodyInclusion:()=>v,setActiveExamplesMember:()=>y,setRequestContentType:()=>b,setResponseContentType:()=>w,setServerVariableValue:()=>E,setRequestBodyValidateError:()=>x,clearRequestBodyValidateError:()=>_,initRequestBodyValidateError:()=>S,clearRequestBodyValue:()=>A});const n="oas3_set_servers",o="oas3_set_request_body_value",a="oas3_set_request_body_retain_flag",i="oas3_set_request_body_inclusion",s="oas3_set_active_examples_member",l="oas3_set_request_content_type",u="oas3_set_response_content_type",c="oas3_set_server_variable_value",p="oas3_set_request_body_validate_error",f="oas3_clear_request_body_validate_error",h="oas3_clear_request_body_value";function d(e,t){return{type:n,payload:{selectedServerUrl:e,namespace:t}}}function m(e){let{value:t,pathMethod:r}=e;return{type:o,payload:{value:t,pathMethod:r}}}const g=e=>{let{value:t,pathMethod:r}=e;return{type:a,payload:{value:t,pathMethod:r}}};function v(e){let{value:t,pathMethod:r,name:n}=e;return{type:i,payload:{value:t,pathMethod:r,name:n}}}function y(e){let{name:t,pathMethod:r,contextType:n,contextName:o}=e;return{type:s,payload:{name:t,pathMethod:r,contextType:n,contextName:o}}}function b(e){let{value:t,pathMethod:r}=e;return{type:l,payload:{value:t,pathMethod:r}}}function w(e){let{value:t,path:r,method:n}=e;return{type:u,payload:{value:t,path:r,method:n}}}function E(e){let{server:t,namespace:r,key:n,val:o}=e;return{type:c,payload:{server:t,namespace:r,key:n,val:o}}}const x=e=>{let{path:t,method:r,validationErrors:n}=e;return{type:p,payload:{path:t,method:r,validationErrors:n}}},_=e=>{let{path:t,method:r}=e;return{type:f,payload:{path:t,method:r}}},S=e=>{let{pathMethod:t}=e;return{type:f,payload:{path:t[0],method:t[1]}}},A=e=>{let{pathMethod:t}=e;return{type:h,payload:{pathMethod:t}}}},73723:(e,t,r)=>{"use strict";r.r(t),r.d(t,{definitionsToAuthorize:()=>f});var n=r(86),o=r.n(n),a=r(14418),i=r.n(a),s=r(24282),l=r.n(s),u=r(20573),c=r(43393),p=r(7779);const f=(h=(0,u.P1)((e=>e),(e=>{let{specSelectors:t}=e;return t.securityDefinitions()}),((e,t)=>{var r;let n=(0,c.List)();return t?(o()(r=t.entrySeq()).call(r,(e=>{let[t,r]=e;const a=r.get("type");var s;if("oauth2"===a&&o()(s=r.get("flows").entrySeq()).call(s,(e=>{let[o,a]=e,s=(0,c.fromJS)({flow:o,authorizationUrl:a.get("authorizationUrl"),tokenUrl:a.get("tokenUrl"),scopes:a.get("scopes"),type:r.get("type"),description:r.get("description")});n=n.push(new c.Map({[t]:i()(s).call(s,(e=>void 0!==e))}))})),"http"!==a&&"apiKey"!==a||(n=n.push(new c.Map({[t]:r}))),"openIdConnect"===a&&r.get("openIdConnectData")){let e=r.get("openIdConnectData"),a=e.get("grant_types_supported")||["authorization_code","implicit"];o()(a).call(a,(o=>{var a;let s=e.get("scopes_supported")&&l()(a=e.get("scopes_supported")).call(a,((e,t)=>e.set(t,"")),new c.Map),u=(0,c.fromJS)({flow:o,authorizationUrl:e.get("authorization_endpoint"),tokenUrl:e.get("token_endpoint"),scopes:s,type:"oauth2",openIdConnectUrl:r.get("openIdConnectUrl")});n=n.push(new c.Map({[t]:i()(u).call(u,(e=>void 0!==e))}))}))}})),n):n})),(e,t)=>function(){const r=t.getSystem().specSelectors.specJson();for(var n=arguments.length,o=new Array(n),a=0;a{"use strict";r.r(t),r.d(t,{default:()=>u});var n=r(23101),o=r.n(n),a=r(97606),i=r.n(a),s=r(67294),l=(r(23930),r(43393));const u=e=>{var t;let{callbacks:r,getComponent:n,specPath:a}=e;const u=n("OperationContainer",!0);if(!r)return s.createElement("span",null,"No callbacks");let c=i()(t=r.entrySeq()).call(t,(t=>{var r;let[n,c]=t;return s.createElement("div",{key:n},s.createElement("h2",null,n),i()(r=c.entrySeq()).call(r,(t=>{var r;let[c,p]=t;return"$$ref"===c?null:s.createElement("div",{key:c},i()(r=p.entrySeq()).call(r,(t=>{let[r,i]=t;if("$$ref"===r)return null;let p=(0,l.fromJS)({operation:i});return s.createElement(u,o()({},e,{op:p,key:r,tag:"",method:r,path:c,specPath:a.push(n,c,r),allowTryItOut:!1}))})))})))}));return s.createElement("div",null,c)}},86775:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>f});var n=r(61125),o=r.n(n),a=r(76986),i=r.n(a),s=r(14418),l=r.n(s),u=r(97606),c=r.n(u),p=r(67294);class f extends p.Component{constructor(e,t){super(e,t),o()(this,"onChange",(e=>{let{onChange:t}=this.props,{value:r,name:n}=e.target,o=i()({},this.state.value);n?o[n]=r:o=r,this.setState({value:o},(()=>t(this.state)))}));let{name:r,schema:n}=this.props,a=this.getValue();this.state={name:r,schema:n,value:a}}getValue(){let{name:e,authorized:t}=this.props;return t&&t.getIn([e,"value"])}render(){var e;let{schema:t,getComponent:r,errSelectors:n,name:o}=this.props;const a=r("Input"),i=r("Row"),s=r("Col"),u=r("authError"),f=r("Markdown",!0),h=r("JumpToPath",!0),d=(t.get("scheme")||"").toLowerCase();let m=this.getValue(),g=l()(e=n.allErrors()).call(e,(e=>e.get("authId")===o));if("basic"===d){var v;let e=m?m.get("username"):null;return p.createElement("div",null,p.createElement("h4",null,p.createElement("code",null,o||t.get("name")),"  (http, Basic)",p.createElement(h,{path:["securityDefinitions",o]})),e&&p.createElement("h6",null,"Authorized"),p.createElement(i,null,p.createElement(f,{source:t.get("description")})),p.createElement(i,null,p.createElement("label",null,"Username:"),e?p.createElement("code",null," ",e," "):p.createElement(s,null,p.createElement(a,{type:"text",required:"required",name:"username","aria-label":"auth-basic-username",onChange:this.onChange,autoFocus:!0}))),p.createElement(i,null,p.createElement("label",null,"Password:"),e?p.createElement("code",null," ****** "):p.createElement(s,null,p.createElement(a,{autoComplete:"new-password",name:"password",type:"password","aria-label":"auth-basic-password",onChange:this.onChange}))),c()(v=g.valueSeq()).call(v,((e,t)=>p.createElement(u,{error:e,key:t}))))}var y;return"bearer"===d?p.createElement("div",null,p.createElement("h4",null,p.createElement("code",null,o||t.get("name")),"  (http, Bearer)",p.createElement(h,{path:["securityDefinitions",o]})),m&&p.createElement("h6",null,"Authorized"),p.createElement(i,null,p.createElement(f,{source:t.get("description")})),p.createElement(i,null,p.createElement("label",null,"Value:"),m?p.createElement("code",null," ****** "):p.createElement(s,null,p.createElement(a,{type:"text","aria-label":"auth-bearer-value",onChange:this.onChange,autoFocus:!0}))),c()(y=g.valueSeq()).call(y,((e,t)=>p.createElement(u,{error:e,key:t})))):p.createElement("div",null,p.createElement("em",null,p.createElement("b",null,o)," HTTP authentication: unsupported scheme ",`'${d}'`))}}},76467:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>p});var n=r(33427),o=r(42458),a=r(15757),i=r(56617),s=r(9928),l=r(45327),u=r(86775),c=r(96796);const p={Callbacks:n.default,HttpAuth:u.default,RequestBody:o.default,Servers:i.default,ServersContainer:s.default,RequestBodyEditor:l.default,OperationServers:c.default,operationLink:a.default}},15757:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>u});var n=r(35627),o=r.n(n),a=r(97606),i=r.n(a),s=r(67294);r(23930);class l extends s.Component{render(){const{link:e,name:t,getComponent:r}=this.props,n=r("Markdown",!0);let a=e.get("operationId")||e.get("operationRef"),l=e.get("parameters")&&e.get("parameters").toJS(),u=e.get("description");return s.createElement("div",{className:"operation-link"},s.createElement("div",{className:"description"},s.createElement("b",null,s.createElement("code",null,t)),u?s.createElement(n,{source:u}):null),s.createElement("pre",null,"Operation `",a,"`",s.createElement("br",null),s.createElement("br",null),"Parameters ",function(e,t){var r;if("string"!=typeof t)return"";return i()(r=t.split("\n")).call(r,((t,r)=>r>0?Array(e+1).join(" ")+t:t)).join("\n")}(0,o()(l,null,2))||"{}",s.createElement("br",null)))}}const u=l},96796:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(61125),o=r.n(n),a=r(67294);r(23930);class i extends a.Component{constructor(){super(...arguments),o()(this,"setSelectedServer",(e=>{const{path:t,method:r}=this.props;return this.forceUpdate(),this.props.setSelectedServer(e,`${t}:${r}`)})),o()(this,"setServerVariableValue",(e=>{const{path:t,method:r}=this.props;return this.forceUpdate(),this.props.setServerVariableValue({...e,namespace:`${t}:${r}`})})),o()(this,"getSelectedServer",(()=>{const{path:e,method:t}=this.props;return this.props.getSelectedServer(`${e}:${t}`)})),o()(this,"getServerVariable",((e,t)=>{const{path:r,method:n}=this.props;return this.props.getServerVariable({namespace:`${r}:${n}`,server:e},t)})),o()(this,"getEffectiveServerValue",(e=>{const{path:t,method:r}=this.props;return this.props.getEffectiveServerValue({server:e,namespace:`${t}:${r}`})}))}render(){const{operationServers:e,pathServers:t,getComponent:r}=this.props;if(!e&&!t)return null;const n=r("Servers"),o=e||t,i=e?"operation":"path";return a.createElement("div",{className:"opblock-section operation-servers"},a.createElement("div",{className:"opblock-section-header"},a.createElement("div",{className:"tab-header"},a.createElement("h4",{className:"opblock-title"},"Servers"))),a.createElement("div",{className:"opblock-description-wrapper"},a.createElement("h4",{className:"message"},"These ",i,"-level options override the global server options."),a.createElement(n,{servers:o,currentServer:this.getSelectedServer(),setSelectedServer:this.setSelectedServer,setServerVariableValue:this.setServerVariableValue,getServerVariable:this.getServerVariable,getEffectiveServerValue:this.getEffectiveServerValue})))}}},45327:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>c});var n=r(61125),o=r.n(n),a=r(67294),i=r(94184),s=r.n(i),l=r(90242);const u=Function.prototype;class c extends a.PureComponent{constructor(e,t){super(e,t),o()(this,"applyDefaultValue",(e=>{const{onChange:t,defaultValue:r}=e||this.props;return this.setState({value:r}),t(r)})),o()(this,"onChange",(e=>{this.props.onChange((0,l.Pz)(e))})),o()(this,"onDomChange",(e=>{const t=e.target.value;this.setState({value:t},(()=>this.onChange(t)))})),this.state={value:(0,l.Pz)(e.value)||e.defaultValue},e.onChange(e.value)}UNSAFE_componentWillReceiveProps(e){this.props.value!==e.value&&e.value!==this.state.value&&this.setState({value:(0,l.Pz)(e.value)}),!e.value&&e.defaultValue&&this.state.value&&this.applyDefaultValue(e)}render(){let{getComponent:e,errors:t}=this.props,{value:r}=this.state,n=t.size>0;const o=e("TextArea");return a.createElement("div",{className:"body-param"},a.createElement(o,{className:s()("body-param__text",{invalid:n}),title:t.size?t.join(", "):"",value:r,onChange:this.onDomChange}))}}o()(c,"defaultProps",{onChange:u,userHasEditedBody:!1})},42458:(e,t,r)=>{"use strict";r.r(t),r.d(t,{getDefaultRequestBodyValue:()=>m,default:()=>g});var n=r(97606),o=r.n(n),a=r(11882),i=r.n(a),s=r(58118),l=r.n(s),u=r(58309),c=r.n(u),p=r(67294),f=(r(23930),r(43393)),h=r(90242),d=r(2518);const m=(e,t,r)=>{const n=e.getIn(["content",t]),o=n.get("schema").toJS(),a=void 0!==n.get("examples"),i=n.get("example"),s=a?n.getIn(["examples",r,"value"]):i,l=(0,h.xi)(o,t,{includeWriteOnly:!0},s);return(0,h.Pz)(l)},g=e=>{let{userHasEditedBody:t,requestBody:r,requestBodyValue:n,requestBodyInclusionSetting:a,requestBodyErrors:s,getComponent:u,getConfigs:g,specSelectors:v,fn:y,contentType:b,isExecute:w,specPath:E,onChange:x,onChangeIncludeEmpty:_,activeExamplesKey:S,updateActiveExamplesKey:A,setRetainRequestBodyValueFlag:k}=e;const C=e=>{x(e.target.files[0])},O=e=>{let t={key:e,shouldDispatchInit:!1,defaultValue:!0};return"no value"===a.get(e,"no value")&&(t.shouldDispatchInit=!0),t},j=u("Markdown",!0),I=u("modelExample"),N=u("RequestBodyEditor"),T=u("highlightCode"),P=u("ExamplesSelectValueRetainer"),R=u("Example"),M=u("ParameterIncludeEmpty"),{showCommonExtensions:D}=g(),L=r&&r.get("description")||null,B=r&&r.get("content")||new f.OrderedMap;b=b||B.keySeq().first()||"";const F=B.get(b,(0,f.OrderedMap)()),z=F.get("schema",(0,f.OrderedMap)()),U=F.get("examples",null),q=null==U?void 0:o()(U).call(U,((e,t)=>{var n;const o=null===(n=e)||void 0===n?void 0:n.get("value",null);return o&&(e=e.set("value",m(r,b,t),o)),e}));if(s=f.List.isList(s)?s:(0,f.List)(),!F.size)return null;const V="object"===F.getIn(["schema","type"]),$="binary"===F.getIn(["schema","format"]),W="base64"===F.getIn(["schema","format"]);if("application/octet-stream"===b||0===i()(b).call(b,"image/")||0===i()(b).call(b,"audio/")||0===i()(b).call(b,"video/")||$||W){const e=u("Input");return w?p.createElement(e,{type:"file",onChange:C}):p.createElement("i",null,"Example values are not available for ",p.createElement("code",null,b)," media types.")}if(V&&("application/x-www-form-urlencoded"===b||0===i()(b).call(b,"multipart/"))&&z.get("properties",(0,f.OrderedMap)()).size>0){var H;const e=u("JsonSchemaForm"),t=u("ParameterExt"),r=z.get("properties",(0,f.OrderedMap)());return n=f.Map.isMap(n)?n:(0,f.OrderedMap)(),p.createElement("div",{className:"table-container"},L&&p.createElement(j,{source:L}),p.createElement("table",null,p.createElement("tbody",null,f.Map.isMap(r)&&o()(H=r.entrySeq()).call(H,(r=>{var i,d;let[m,g]=r;if(g.get("readOnly"))return;let v=D?(0,h.po)(g):null;const b=l()(i=z.get("required",(0,f.List)())).call(i,m),E=g.get("type"),S=g.get("format"),A=g.get("description"),k=n.getIn([m,"value"]),C=n.getIn([m,"errors"])||s,I=a.get(m)||!1,N=g.has("default")||g.has("example")||g.hasIn(["items","example"])||g.hasIn(["items","default"]),T=g.has("enum")&&(1===g.get("enum").size||b),P=N||T;let R="";"array"!==E||P||(R=[]),("object"===E||P)&&(R=(0,h.xi)(g,!1,{includeWriteOnly:!0})),"string"!=typeof R&&"object"===E&&(R=(0,h.Pz)(R)),"string"==typeof R&&"array"===E&&(R=JSON.parse(R));const L="string"===E&&("binary"===S||"base64"===S);return p.createElement("tr",{key:m,className:"parameters","data-property-name":m},p.createElement("td",{className:"parameters-col_name"},p.createElement("div",{className:b?"parameter__name required":"parameter__name"},m,b?p.createElement("span",null," *"):null),p.createElement("div",{className:"parameter__type"},E,S&&p.createElement("span",{className:"prop-format"},"($",S,")"),D&&v.size?o()(d=v.entrySeq()).call(d,(e=>{let[r,n]=e;return p.createElement(t,{key:`${r}-${n}`,xKey:r,xVal:n})})):null),p.createElement("div",{className:"parameter__deprecated"},g.get("deprecated")?"deprecated":null)),p.createElement("td",{className:"parameters-col_description"},p.createElement(j,{source:A}),w?p.createElement("div",null,p.createElement(e,{fn:y,dispatchInitialValue:!L,schema:g,description:m,getComponent:u,value:void 0===k?R:k,required:b,errors:C,onChange:e=>{x(e,[m])}}),b?null:p.createElement(M,{onChange:e=>_(m,e),isIncluded:I,isIncludedOptions:O(m),isDisabled:c()(k)?0!==k.length:!(0,h.O2)(k)})):null))})))))}const J=m(r,b,S);let K=null;return(0,d.O)(J)&&(K="json"),p.createElement("div",null,L&&p.createElement(j,{source:L}),q?p.createElement(P,{userHasEditedBody:t,examples:q,currentKey:S,currentUserInputValue:n,onSelect:e=>{A(e)},updateValue:x,defaultToFirstExample:!0,getComponent:u,setRetainRequestBodyValueFlag:k}):null,w?p.createElement("div",null,p.createElement(N,{value:n,errors:s,defaultValue:J,onChange:x,getComponent:u})):p.createElement(I,{getComponent:u,getConfigs:g,specSelectors:v,expandDepth:1,isExecute:w,schema:F.get("schema"),specPath:E.push("content",b),example:p.createElement(T,{className:"body-param__example",getConfigs:g,language:K,value:(0,h.Pz)(n)||J}),includeWriteOnly:!0}),q?p.createElement(R,{example:q.get(S),getComponent:u,getConfigs:g}):null)}},9928:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(67294);class o extends n.Component{render(){const{specSelectors:e,oas3Selectors:t,oas3Actions:r,getComponent:o}=this.props,a=e.servers(),i=o("Servers");return a&&a.size?n.createElement("div",null,n.createElement("span",{className:"servers-title"},"Servers"),n.createElement(i,{servers:a,currentServer:t.selectedServer(),setSelectedServer:r.setSelectedServer,setServerVariableValue:r.setServerVariableValue,getServerVariable:t.serverVariableValue,getEffectiveServerValue:t.serverEffectiveValue})):null}}},56617:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>p});var n=r(61125),o=r.n(n),a=r(51679),i=r.n(a),s=r(97606),l=r.n(s),u=r(67294),c=r(43393);r(23930);class p extends u.Component{constructor(){super(...arguments),o()(this,"onServerChange",(e=>{this.setServer(e.target.value)})),o()(this,"onServerVariableValueChange",(e=>{let{setServerVariableValue:t,currentServer:r}=this.props,n=e.target.getAttribute("data-variable"),o=e.target.value;"function"==typeof t&&t({server:r,key:n,val:o})})),o()(this,"setServer",(e=>{let{setSelectedServer:t}=this.props;t(e)}))}componentDidMount(){var e;let{servers:t,currentServer:r}=this.props;r||this.setServer(null===(e=t.first())||void 0===e?void 0:e.get("url"))}UNSAFE_componentWillReceiveProps(e){let{servers:t,setServerVariableValue:r,getServerVariable:n}=e;if(this.props.currentServer!==e.currentServer||this.props.servers!==e.servers){var o;let a=i()(t).call(t,(t=>t.get("url")===e.currentServer)),s=i()(o=this.props.servers).call(o,(e=>e.get("url")===this.props.currentServer))||(0,c.OrderedMap)();if(!a)return this.setServer(t.first().get("url"));let u=s.get("variables")||(0,c.OrderedMap)(),p=(i()(u).call(u,(e=>e.get("default")))||(0,c.OrderedMap)()).get("default"),f=a.get("variables")||(0,c.OrderedMap)(),h=(i()(f).call(f,(e=>e.get("default")))||(0,c.OrderedMap)()).get("default");l()(f).call(f,((t,o)=>{n(e.currentServer,o)&&p===h||r({server:e.currentServer,key:o,val:t.get("default")||""})}))}}render(){var e,t;let{servers:r,currentServer:n,getServerVariable:o,getEffectiveServerValue:a}=this.props,s=(i()(r).call(r,(e=>e.get("url")===n))||(0,c.OrderedMap)()).get("variables")||(0,c.OrderedMap)(),p=0!==s.size;return u.createElement("div",{className:"servers"},u.createElement("label",{htmlFor:"servers"},u.createElement("select",{onChange:this.onServerChange,value:n},l()(e=r.valueSeq()).call(e,(e=>u.createElement("option",{value:e.get("url"),key:e.get("url")},e.get("url"),e.get("description")&&` - ${e.get("description")}`))).toArray())),p?u.createElement("div",null,u.createElement("div",{className:"computed-url"},"Computed URL:",u.createElement("code",null,a(n))),u.createElement("h4",null,"Server variables"),u.createElement("table",null,u.createElement("tbody",null,l()(t=s.entrySeq()).call(t,(e=>{var t;let[r,a]=e;return u.createElement("tr",{key:r},u.createElement("td",null,r),u.createElement("td",null,a.get("enum")?u.createElement("select",{"data-variable":r,onChange:this.onServerVariableValueChange},l()(t=a.get("enum")).call(t,(e=>u.createElement("option",{selected:e===o(n,r),key:e,value:e},e)))):u.createElement("input",{type:"text",value:o(n,r)||"",onChange:this.onServerVariableValueChange,"data-variable":r})))}))))):null)}}},7779:(e,t,r)=>{"use strict";r.r(t),r.d(t,{isOAS3:()=>l,isSwagger2:()=>u,OAS3ComponentWrapFactory:()=>c});var n=r(23101),o=r.n(n),a=r(27043),i=r.n(a),s=r(67294);function l(e){const t=e.get("openapi");return"string"==typeof t&&(i()(t).call(t,"3.0.")&&t.length>4)}function u(e){const t=e.get("swagger");return"string"==typeof t&&i()(t).call(t,"2.0")}function c(e){return(t,r)=>n=>{if(r&&r.specSelectors&&r.specSelectors.specJson){return l(r.specSelectors.specJson())?s.createElement(e,o()({},n,r,{Ori:t})):s.createElement(t,n)}return console.warn("OAS3 wrapper: couldn't get spec"),null}}},97451:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>p});var n=r(92044),o=r(73723),a=r(91741),i=r(76467),s=r(37761),l=r(67002),u=r(5065),c=r(62109);function p(){return{components:i.default,wrapComponents:s.default,statePlugins:{spec:{wrapSelectors:n,selectors:a},auth:{wrapSelectors:o},oas3:{actions:l,reducers:c.default,selectors:u}}}}},62109:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>p});var n=r(8712),o=r.n(n),a=r(86),i=r.n(a),s=r(24282),l=r.n(s),u=r(43393),c=r(67002);const p={[c.UPDATE_SELECTED_SERVER]:(e,t)=>{let{payload:{selectedServerUrl:r,namespace:n}}=t;const o=n?[n,"selectedServer"]:["selectedServer"];return e.setIn(o,r)},[c.UPDATE_REQUEST_BODY_VALUE]:(e,t)=>{let{payload:{value:r,pathMethod:n}}=t,[a,s]=n;if(!u.Map.isMap(r))return e.setIn(["requestData",a,s,"bodyValue"],r);let l,c=e.getIn(["requestData",a,s,"bodyValue"])||(0,u.Map)();u.Map.isMap(c)||(c=(0,u.Map)());const[...p]=o()(r).call(r);return i()(p).call(p,(e=>{let t=r.getIn([e]);c.has(e)&&u.Map.isMap(t)||(l=c.setIn([e,"value"],t))})),e.setIn(["requestData",a,s,"bodyValue"],l)},[c.UPDATE_REQUEST_BODY_VALUE_RETAIN_FLAG]:(e,t)=>{let{payload:{value:r,pathMethod:n}}=t,[o,a]=n;return e.setIn(["requestData",o,a,"retainBodyValue"],r)},[c.UPDATE_REQUEST_BODY_INCLUSION]:(e,t)=>{let{payload:{value:r,pathMethod:n,name:o}}=t,[a,i]=n;return e.setIn(["requestData",a,i,"bodyInclusion",o],r)},[c.UPDATE_ACTIVE_EXAMPLES_MEMBER]:(e,t)=>{let{payload:{name:r,pathMethod:n,contextType:o,contextName:a}}=t,[i,s]=n;return e.setIn(["examples",i,s,o,a,"activeExample"],r)},[c.UPDATE_REQUEST_CONTENT_TYPE]:(e,t)=>{let{payload:{value:r,pathMethod:n}}=t,[o,a]=n;return e.setIn(["requestData",o,a,"requestContentType"],r)},[c.UPDATE_RESPONSE_CONTENT_TYPE]:(e,t)=>{let{payload:{value:r,path:n,method:o}}=t;return e.setIn(["requestData",n,o,"responseContentType"],r)},[c.UPDATE_SERVER_VARIABLE_VALUE]:(e,t)=>{let{payload:{server:r,namespace:n,key:o,val:a}}=t;const i=n?[n,"serverVariableValues",r,o]:["serverVariableValues",r,o];return e.setIn(i,a)},[c.SET_REQUEST_BODY_VALIDATE_ERROR]:(e,t)=>{let{payload:{path:r,method:n,validationErrors:o}}=t,a=[];if(a.push("Required field is not provided"),o.missingBodyValue)return e.setIn(["requestData",r,n,"errors"],(0,u.fromJS)(a));if(o.missingRequiredKeys&&o.missingRequiredKeys.length>0){const{missingRequiredKeys:t}=o;return e.updateIn(["requestData",r,n,"bodyValue"],(0,u.fromJS)({}),(e=>l()(t).call(t,((e,t)=>e.setIn([t,"errors"],(0,u.fromJS)(a))),e)))}return console.warn("unexpected result: SET_REQUEST_BODY_VALIDATE_ERROR"),e},[c.CLEAR_REQUEST_BODY_VALIDATE_ERROR]:(e,t)=>{let{payload:{path:r,method:n}}=t;const a=e.getIn(["requestData",r,n,"bodyValue"]);if(!u.Map.isMap(a))return e.setIn(["requestData",r,n,"errors"],(0,u.fromJS)([]));const[...i]=o()(a).call(a);return i?e.updateIn(["requestData",r,n,"bodyValue"],(0,u.fromJS)({}),(e=>l()(i).call(i,((e,t)=>e.setIn([t,"errors"],(0,u.fromJS)([]))),e))):e},[c.CLEAR_REQUEST_BODY_VALUE]:(e,t)=>{let{payload:{pathMethod:r}}=t,[n,o]=r;const a=e.getIn(["requestData",n,o,"bodyValue"]);return a?u.Map.isMap(a)?e.setIn(["requestData",n,o,"bodyValue"],(0,u.Map)()):e.setIn(["requestData",n,o,"bodyValue"],""):e}}},5065:(e,t,r)=>{"use strict";r.r(t),r.d(t,{selectedServer:()=>g,requestBodyValue:()=>v,shouldRetainRequestBodyValue:()=>y,selectDefaultRequestBodyValue:()=>b,hasUserEditedBody:()=>w,requestBodyInclusionSetting:()=>E,requestBodyErrors:()=>x,activeExamplesMember:()=>_,requestContentType:()=>S,responseContentType:()=>A,serverVariableValue:()=>k,serverVariables:()=>C,serverEffectiveValue:()=>O,validateBeforeExecute:()=>j,validateShallowRequired:()=>N});var n=r(97606),o=r.n(n),a=r(86),i=r.n(a),s=r(28222),l=r.n(s),u=r(11882),c=r.n(u),p=r(43393),f=r(7779),h=r(42458),d=r(90242);function m(e){return function(){for(var t=arguments.length,r=new Array(t),n=0;n{const n=t.getSystem().specSelectors.specJson();return(0,f.isOAS3)(n)?e(...r):null}}}const g=m(((e,t)=>{const r=t?[t,"selectedServer"]:["selectedServer"];return e.getIn(r)||""})),v=m(((e,t,r)=>e.getIn(["requestData",t,r,"bodyValue"])||null)),y=m(((e,t,r)=>e.getIn(["requestData",t,r,"retainBodyValue"])||!1)),b=(e,t,r)=>e=>{const{oas3Selectors:n,specSelectors:o}=e.getSystem(),a=o.specJson();if((0,f.isOAS3)(a)){const e=n.requestContentType(t,r);if(e)return(0,h.getDefaultRequestBodyValue)(o.specResolvedSubtree(["paths",t,r,"requestBody"]),e,n.activeExamplesMember(t,r,"requestBody","requestBody"))}return null},w=(e,t,r)=>e=>{const{oas3Selectors:n,specSelectors:o}=e.getSystem(),a=o.specJson();if((0,f.isOAS3)(a)){let e=!1;const a=n.requestContentType(t,r);let i=n.requestBodyValue(t,r);if(p.Map.isMap(i)&&(i=(0,d.Pz)(i.mapEntries((e=>p.Map.isMap(e[1])?[e[0],e[1].get("value")]:e)).toJS())),p.List.isList(i)&&(i=(0,d.Pz)(i)),a){const s=(0,h.getDefaultRequestBodyValue)(o.specResolvedSubtree(["paths",t,r,"requestBody"]),a,n.activeExamplesMember(t,r,"requestBody","requestBody"));e=!!i&&i!==s}return e}return null},E=m(((e,t,r)=>e.getIn(["requestData",t,r,"bodyInclusion"])||(0,p.Map)())),x=m(((e,t,r)=>e.getIn(["requestData",t,r,"errors"])||null)),_=m(((e,t,r,n,o)=>e.getIn(["examples",t,r,n,o,"activeExample"])||null)),S=m(((e,t,r)=>e.getIn(["requestData",t,r,"requestContentType"])||null)),A=m(((e,t,r)=>e.getIn(["requestData",t,r,"responseContentType"])||null)),k=m(((e,t,r)=>{let n;if("string"!=typeof t){const{server:e,namespace:o}=t;n=o?[o,"serverVariableValues",e,r]:["serverVariableValues",e,r]}else{n=["serverVariableValues",t,r]}return e.getIn(n)||null})),C=m(((e,t)=>{let r;if("string"!=typeof t){const{server:e,namespace:n}=t;r=n?[n,"serverVariableValues",e]:["serverVariableValues",e]}else{r=["serverVariableValues",t]}return e.getIn(r)||(0,p.OrderedMap)()})),O=m(((e,t)=>{var r,n;if("string"!=typeof t){const{server:o,namespace:a}=t;n=o,r=a?e.getIn([a,"serverVariableValues",n]):e.getIn(["serverVariableValues",n])}else n=t,r=e.getIn(["serverVariableValues",n]);r=r||(0,p.OrderedMap)();let a=n;return o()(r).call(r,((e,t)=>{a=a.replace(new RegExp(`{${t}}`,"g"),e)})),a})),j=(I=(e,t)=>((e,t)=>(t=t||[],!!e.getIn(["requestData",...t,"bodyValue"])))(e,t),function(){for(var e=arguments.length,t=new Array(e),r=0;r{const r=e.getSystem().specSelectors.specJson();let n=[...t][1]||[];return!r.getIn(["paths",...n,"requestBody","required"])||I(...t)}});var I;const N=(e,t)=>{var r;let{oas3RequiredRequestBodyContentType:n,oas3RequestContentType:o,oas3RequestBodyValue:a}=t,s=[];if(!p.Map.isMap(a))return s;let u=[];return i()(r=l()(n.requestContentType)).call(r,(e=>{if(e===o){let t=n.requestContentType[e];i()(t).call(t,(e=>{c()(u).call(u,e)<0&&u.push(e)}))}})),i()(u).call(u,(e=>{a.getIn([e,"value"])||s.push(e)})),s}},91741:(e,t,r)=>{"use strict";r.r(t),r.d(t,{servers:()=>u,isSwagger2:()=>p});var n=r(20573),o=r(43393),a=r(7779);const i=e=>e||(0,o.Map)(),s=(0,n.P1)(i,(e=>e.get("json",(0,o.Map)()))),l=(0,n.P1)(i,(e=>e.get("resolved",(0,o.Map)()))),u=(c=(0,n.P1)((e=>{let t=l(e);return t.count()<1&&(t=s(e)),t}),(e=>e.getIn(["servers"])||(0,o.Map)())),()=>function(e){const t=e.getSystem().specSelectors.specJson();if((0,a.isOAS3)(t)){for(var r=arguments.length,n=new Array(r>1?r-1:0),o=1;o()=>{const e=t.getSystem().specSelectors.specJson();return(0,a.isSwagger2)(e)}},92044:(e,t,r)=>{"use strict";r.r(t),r.d(t,{definitions:()=>h,hasHost:()=>d,securityDefinitions:()=>m,host:()=>g,basePath:()=>v,consumes:()=>y,produces:()=>b,schemes:()=>w,servers:()=>E,isOAS3:()=>x,isSwagger2:()=>_});var n=r(20573),o=r(33881),a=r(43393),i=r(7779);function s(e){return(t,r)=>function(){const n=r.getSystem().specSelectors.specJson();return(0,i.isOAS3)(n)?e(...arguments):t(...arguments)}}const l=e=>e||(0,a.Map)(),u=s((0,n.P1)((()=>null))),c=(0,n.P1)(l,(e=>e.get("json",(0,a.Map)()))),p=(0,n.P1)(l,(e=>e.get("resolved",(0,a.Map)()))),f=e=>{let t=p(e);return t.count()<1&&(t=c(e)),t},h=s((0,n.P1)(f,(e=>{const t=e.getIn(["components","schemas"]);return a.Map.isMap(t)?t:(0,a.Map)()}))),d=s((e=>f(e).hasIn(["servers",0]))),m=s((0,n.P1)(o.specJsonWithResolvedSubtrees,(e=>e.getIn(["components","securitySchemes"])||null))),g=u,v=u,y=u,b=u,w=u,E=s((0,n.P1)(f,(e=>e.getIn(["servers"])||(0,a.Map)()))),x=(e,t)=>()=>{const e=t.getSystem().specSelectors.specJson();return(0,i.isOAS3)(a.Map.isMap(e)?e:(0,a.Map)())},_=(e,t)=>()=>{const e=t.getSystem().specSelectors.specJson();return(0,i.isSwagger2)(a.Map.isMap(e)?e:(0,a.Map)())}},70356:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(67294);const o=(0,r(7779).OAS3ComponentWrapFactory)((e=>{let{Ori:t,...r}=e;const{schema:o,getComponent:a,errSelectors:i,authorized:s,onAuthChange:l,name:u}=r,c=a("HttpAuth");return"http"===o.get("type")?n.createElement(c,{key:u,schema:o,name:u,errSelectors:i,authorized:s,getComponent:a,onChange:l}):n.createElement(t,r)}))},37761:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>u});var n=r(22460),o=r(70356),a=r(69487),i=r(50058),s=r(53499),l=r(90287);const u={Markdown:n.default,AuthItem:o.default,JsonSchema_string:l.default,VersionStamp:a.default,model:s.default,onlineValidatorBadge:i.default}},90287:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(67294);const o=(0,r(7779).OAS3ComponentWrapFactory)((e=>{let{Ori:t,...r}=e;const{schema:o,getComponent:a,errors:i,onChange:s}=r,l=o&&o.get?o.get("format"):null,u=o&&o.get?o.get("type"):null,c=a("Input");return u&&"string"===u&&l&&("binary"===l||"base64"===l)?n.createElement(c,{type:"file",className:i.length?"invalid":"",title:i.length?i:"",onChange:e=>{s(e.target.files[0])},disabled:t.isDisabled}):n.createElement(t,r)}))},22460:(e,t,r)=>{"use strict";r.r(t),r.d(t,{Markdown:()=>f,default:()=>h});var n=r(81607),o=r.n(n),a=r(67294),i=r(94184),s=r.n(i),l=r(89927),u=r(7779),c=r(86019);const p=new l._("commonmark");p.block.ruler.enable(["table"]),p.set({linkTarget:"_blank"});const f=e=>{let{source:t,className:r="",getConfigs:n}=e;if("string"!=typeof t)return null;if(t){const{useUnsafeMarkdown:e}=n(),i=p.render(t),l=(0,c.s)(i,{useUnsafeMarkdown:e});let u;return"string"==typeof l&&(u=o()(l).call(l)),a.createElement("div",{dangerouslySetInnerHTML:{__html:u},className:s()(r,"renderedMarkdown")})}return null};f.defaultProps={getConfigs:()=>({useUnsafeMarkdown:!1})};const h=(0,u.OAS3ComponentWrapFactory)(f)},53499:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>u});var n=r(23101),o=r.n(n),a=r(67294),i=r(7779),s=r(53795);class l extends a.Component{render(){let{getConfigs:e,schema:t}=this.props,r=["model-box"],n=null;return!0===t.get("deprecated")&&(r.push("deprecated"),n=a.createElement("span",{className:"model-deprecated-warning"},"Deprecated:")),a.createElement("div",{className:r.join(" ")},n,a.createElement(s.Z,o()({},this.props,{getConfigs:e,depth:1,expandDepth:this.props.expandDepth||0})))}}const u=(0,i.OAS3ComponentWrapFactory)(l)},50058:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>a});var n=r(7779),o=r(5623);const a=(0,n.OAS3ComponentWrapFactory)(o.Z)},69487:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(67294);const o=(0,r(7779).OAS3ComponentWrapFactory)((e=>{const{Ori:t}=e;return n.createElement("span",null,n.createElement(t,e),n.createElement("small",{className:"version-stamp"},n.createElement("pre",{className:"version"},"OAS3")))}))},28560:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(87198),o=r.n(n);let a=!1;function i(){return{statePlugins:{spec:{wrapActions:{updateSpec:e=>function(){return a=!0,e(...arguments)},updateJsonSpec:(e,t)=>function(){const r=t.getConfigs().onComplete;return a&&"function"==typeof r&&(o()(r,0),a=!1),e(...arguments)}}}}}}},92135:(e,t,r)=>{"use strict";r.r(t),r.d(t,{requestSnippetGenerator_curl_powershell:()=>A,requestSnippetGenerator_curl_bash:()=>k,requestSnippetGenerator_curl_cmd:()=>C});var n=r(11882),o=r.n(n),a=r(81607),i=r.n(a),s=r(35627),l=r.n(s),u=r(97606),c=r.n(u),p=r(12196),f=r.n(p),h=r(74386),d=r.n(h),m=r(58118),g=r.n(m),v=r(27504),y=r(43393);const b=e=>{var t;const r="_**[]";return o()(e).call(e,r)<0?e:i()(t=e.split(r)[0]).call(t)},w=e=>"-d "===e||/^[_\/-]/g.test(e)?e:"'"+e.replace(/'/g,"'\\''")+"'",E=e=>"-d "===(e=e.replace(/\^/g,"^^").replace(/\\"/g,'\\\\"').replace(/"/g,'""').replace(/\n/g,"^\n"))?e.replace(/-d /g,"-d ^\n"):/^[_\/-]/g.test(e)?e:'"'+e+'"',x=e=>"-d "===e?e:/\n/.test(e)?'@"\n'+e.replace(/"/g,'\\"').replace(/`/g,"``").replace(/\$/,"`$")+'\n"@':/^[_\/-]/g.test(e)?e:"'"+e.replace(/"/g,'""').replace(/'/g,"''")+"'";function _(e){let t=[];for(let[r,n]of e.get("body").entrySeq()){let e=b(r);n instanceof v.Z.File?t.push(` "${e}": {\n "name": "${n.name}"${n.type?`,\n "type": "${n.type}"`:""}\n }`):t.push(` "${e}": ${l()(n,null,2).replace(/(\r\n|\r|\n)/g,"\n ")}`)}return`{\n${t.join(",\n")}\n}`}const S=function(e,t,r){let n=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"",o=!1,a="";const i=function(){for(var e=arguments.length,r=new Array(e),n=0;na+=` ${r}`,p=function(){var e;let t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1;return a+=f()(e=" ").call(e,t)};let h=e.get("headers");if(a+="curl"+n,e.has("curlOptions")&&i(...e.get("curlOptions")),i("-X",e.get("method")),u(),p(),s(`${e.get("url")}`),h&&h.size)for(let t of d()(m=e.get("headers")).call(m)){var m;u(),p();let[e,r]=t;s("-H",`${e}: ${r}`),o=o||/^content-type$/i.test(e)&&/^multipart\/form-data$/i.test(r)}const w=e.get("body");var E;if(w)if(o&&g()(E=["POST","PUT","PATCH"]).call(E,e.get("method")))for(let[e,t]of w.entrySeq()){let r=b(e);u(),p(),s("-F"),t instanceof v.Z.File?i(`${r}=@${t.name}${t.type?`;type=${t.type}`:""}`):i(`${r}=${t}`)}else if(w instanceof v.Z.File)u(),p(),s(`--data-binary '@${w.name}'`);else{u(),p(),s("-d ");let t=w;y.Map.isMap(t)?s(_(e)):("string"!=typeof t&&(t=l()(t)),s(t))}else w||"POST"!==e.get("method")||(u(),p(),s("-d ''"));return a},A=e=>S(e,x,"`\n",".exe"),k=e=>S(e,w,"\\\n"),C=e=>S(e,E,"^\n")},86575:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>i});var n=r(92135),o=r(4669),a=r(84206);const i=()=>({components:{RequestSnippets:a.default},fn:n,statePlugins:{requestSnippets:{selectors:o}}})},84206:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>w});var n=r(14418),o=r.n(n),a=r(25110),i=r.n(a),s=r(86),l=r.n(s),u=r(97606),c=r.n(u),p=r(67294),f=r(27361),h=r.n(f),d=r(23560),m=r.n(d),g=r(74855),v=r(33424);const y={cursor:"pointer",lineHeight:1,display:"inline-flex",backgroundColor:"rgb(250, 250, 250)",paddingBottom:"0",paddingTop:"0",border:"1px solid rgb(51, 51, 51)",borderRadius:"4px 4px 0 0",boxShadow:"none",borderBottom:"none"},b={cursor:"pointer",lineHeight:1,display:"inline-flex",backgroundColor:"rgb(51, 51, 51)",boxShadow:"none",border:"1px solid rgb(51, 51, 51)",paddingBottom:"0",paddingTop:"0",borderRadius:"4px 4px 0 0",marginTop:"-5px",marginRight:"-5px",marginLeft:"-5px",zIndex:"9999",borderBottom:"none"},w=e=>{var t,r;let{request:n,requestSnippetsSelectors:a,getConfigs:s}=e;const u=m()(s)?s():null,f=!1!==h()(u,"syntaxHighlight")&&h()(u,"syntaxHighlight.activated",!0),d=(0,p.useRef)(null),[w,E]=(0,p.useState)(null===(t=a.getSnippetGenerators())||void 0===t?void 0:t.keySeq().first()),[x,_]=(0,p.useState)(null==a?void 0:a.getDefaultExpanded());(0,p.useEffect)((()=>{}),[]),(0,p.useEffect)((()=>{var e;const t=o()(e=i()(d.current.childNodes)).call(e,(e=>{var t;return!!e.nodeType&&(null===(t=e.classList)||void 0===t?void 0:t.contains("curl-command"))}));return l()(t).call(t,(e=>e.addEventListener("mousewheel",j,{passive:!1}))),()=>{l()(t).call(t,(e=>e.removeEventListener("mousewheel",j)))}}),[n]);const S=a.getSnippetGenerators(),A=S.get(w),k=A.get("fn")(n),C=()=>{_(!x)},O=e=>e===w?b:y,j=e=>{const{target:t,deltaY:r}=e,{scrollHeight:n,offsetHeight:o,scrollTop:a}=t;n>o&&(0===a&&r<0||o+a>=n&&r>0)&&e.preventDefault()},I=f?p.createElement(v.d3,{language:A.get("syntax"),className:"curl microlight",style:(0,v.C2)(h()(u,"syntaxHighlight.theme"))},k):p.createElement("textarea",{readOnly:!0,className:"curl",value:k});return p.createElement("div",{className:"request-snippets",ref:d},p.createElement("div",{style:{width:"100%",display:"flex",justifyContent:"flex-start",alignItems:"center",marginBottom:"15px"}},p.createElement("h4",{onClick:()=>C(),style:{cursor:"pointer"}},"Snippets"),p.createElement("button",{onClick:()=>C(),style:{border:"none",background:"none"},title:x?"Collapse operation":"Expand operation"},p.createElement("svg",{className:"arrow",width:"10",height:"10"},p.createElement("use",{href:x?"#large-arrow-down":"#large-arrow",xlinkHref:x?"#large-arrow-down":"#large-arrow"})))),x&&p.createElement("div",{className:"curl-command"},p.createElement("div",{style:{paddingLeft:"15px",paddingRight:"10px",width:"100%",display:"flex"}},c()(r=S.entrySeq()).call(r,(e=>{let[t,r]=e;return p.createElement("div",{style:O(t),className:"btn",key:t,onClick:()=>(e=>{w!==e&&E(e)})(t)},p.createElement("h4",{style:t===w?{color:"white"}:{}},r.get("title")))}))),p.createElement("div",{className:"copy-to-clipboard"},p.createElement(g.CopyToClipboard,{text:k},p.createElement("button",null))),p.createElement("div",null,I)))}},4669:(e,t,r)=>{"use strict";r.r(t),r.d(t,{getGenerators:()=>f,getSnippetGenerators:()=>h,getActiveLanguage:()=>d,getDefaultExpanded:()=>m});var n=r(14418),o=r.n(n),a=r(58118),i=r.n(a),s=r(97606),l=r.n(s),u=r(20573),c=r(43393);const p=e=>e||(0,c.Map)(),f=(0,u.P1)(p,(e=>{const t=e.get("languages"),r=e.get("generators",(0,c.Map)());return!t||t.isEmpty()?r:o()(r).call(r,((e,r)=>i()(t).call(t,r)))})),h=e=>t=>{var r,n;let{fn:a}=t;return o()(r=l()(n=f(e)).call(n,((e,t)=>{const r=(e=>a[`requestSnippetGenerator_${e}`])(t);return"function"!=typeof r?null:e.set("fn",r)}))).call(r,(e=>e))},d=(0,u.P1)(p,(e=>e.get("activeLanguage"))),m=(0,u.P1)(p,(e=>e.get("defaultExpanded")))},36195:(e,t,r)=>{"use strict";r.r(t),r.d(t,{ErrorBoundary:()=>i,default:()=>s});var n=r(67294),o=r(56189),a=r(29403);class i extends n.Component{static getDerivedStateFromError(e){return{hasError:!0,error:e}}constructor(){super(...arguments),this.state={hasError:!1,error:null}}componentDidCatch(e,t){this.props.fn.componentDidCatch(e,t)}render(){const{getComponent:e,targetName:t,children:r}=this.props;if(this.state.hasError){const r=e("Fallback");return n.createElement(r,{name:t})}return r}}i.defaultProps={targetName:"this component",getComponent:()=>a.default,fn:{componentDidCatch:o.componentDidCatch},children:null};const s=i},29403:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(67294);const o=e=>{let{name:t}=e;return n.createElement("div",{className:"fallback"},"😱 ",n.createElement("i",null,"Could not render ","t"===t?"this component":t,", see the console."))}},56189:(e,t,r)=>{"use strict";r.r(t),r.d(t,{componentDidCatch:()=>i,withErrorBoundary:()=>s});var n=r(23101),o=r.n(n),a=r(67294);const i=console.error,s=e=>t=>{const{getComponent:r,fn:n}=e(),i=r("ErrorBoundary"),s=n.getDisplayName(t);class l extends a.Component{render(){return a.createElement(i,{targetName:s,getComponent:r,fn:n},a.createElement(t,o()({},this.props,this.context)))}}var u;return l.displayName=`WithErrorBoundary(${s})`,(u=t).prototype&&u.prototype.isReactComponent&&(l.prototype.mapStateToProps=t.prototype.mapStateToProps),l}},27621:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>c});var n=r(47475),o=r.n(n),a=r(7287),i=r.n(a),s=r(36195),l=r(29403),u=r(56189);const c=function(){let{componentList:e=[],fullOverride:t=!1}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return r=>{var n;let{getSystem:a}=r;const c=t?e:["App","BaseLayout","VersionPragmaFilter","InfoContainer","ServersContainer","SchemesContainer","AuthorizeBtnContainer","FilterContainer","Operations","OperationContainer","parameters","responses","OperationServers","Models","ModelWrapper",...e],p=i()(c,o()(n=Array(c.length)).call(n,((e,t)=>{let{fn:r}=t;return r.withErrorBoundary(e)})));return{fn:{componentDidCatch:u.componentDidCatch,withErrorBoundary:(0,u.withErrorBoundary)(a)},components:{ErrorBoundary:s.default,Fallback:l.default},wrapComponents:p}}}},57050:(e,t,r)=>{"use strict";r.r(t),r.d(t,{sampleFromSchemaGeneric:()=>F,inferSchema:()=>z,createXMLExample:()=>U,sampleFromSchema:()=>q,memoizedCreateXMLExample:()=>$,memoizedSampleFromSchema:()=>W});var n=r(11882),o=r.n(n),a=r(86),i=r.n(a),s=r(58309),l=r.n(s),u=r(58118),c=r.n(u),p=r(92039),f=r.n(p),h=r(24278),d=r.n(h),m=r(51679),g=r.n(m),v=r(39022),y=r.n(v),b=r(97606),w=r.n(b),E=r(35627),x=r.n(E),_=r(53479),S=r.n(_),A=r(14419),k=r.n(A),C=r(41609),O=r.n(C),j=r(90242),I=r(60314);const N={string:e=>e.pattern?(e=>{try{return new(k())(e).gen()}catch(e){return"string"}})(e.pattern):"string",string_email:()=>"user@example.com","string_date-time":()=>(new Date).toISOString(),string_date:()=>(new Date).toISOString().substring(0,10),string_uuid:()=>"3fa85f64-5717-4562-b3fc-2c963f66afa6",string_hostname:()=>"example.com",string_ipv4:()=>"198.51.100.42",string_ipv6:()=>"2001:0db8:5b96:0000:0000:426f:8e17:642a",number:()=>0,number_float:()=>0,integer:()=>0,boolean:e=>"boolean"!=typeof e.default||e.default},T=e=>{e=(0,j.mz)(e);let{type:t,format:r}=e,n=N[`${t}_${r}`]||N[t];return(0,j.Wl)(n)?n(e):"Unknown Type: "+e.type},P=e=>(0,j.XV)(e,"$$ref",(e=>"string"==typeof e&&o()(e).call(e,"#")>-1)),R=["maxProperties","minProperties"],M=["minItems","maxItems"],D=["minimum","maximum","exclusiveMinimum","exclusiveMaximum"],L=["minLength","maxLength"],B=function(e,t){var r;let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const a=r=>{void 0===t[r]&&void 0!==e[r]&&(t[r]=e[r])};var s;(i()(r=["example","default","enum","xml","type",...R,...M,...D,...L]).call(r,(e=>a(e))),void 0!==e.required&&l()(e.required))&&(void 0!==t.required&&t.required.length||(t.required=[]),i()(s=e.required).call(s,(e=>{var r;c()(r=t.required).call(r,e)||t.required.push(e)})));if(e.properties){t.properties||(t.properties={});let r=(0,j.mz)(e.properties);for(let a in r){var u;if(Object.prototype.hasOwnProperty.call(r,a))if(!r[a]||!r[a].deprecated)if(!r[a]||!r[a].readOnly||n.includeReadOnly)if(!r[a]||!r[a].writeOnly||n.includeWriteOnly)if(!t.properties[a])t.properties[a]=r[a],!e.required&&l()(e.required)&&-1!==o()(u=e.required).call(u,a)&&(t.required?t.required.push(a):t.required=[a])}}return e.items&&(t.items||(t.items={}),t.items=B(e.items,t.items,n)),t},F=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0,n=arguments.length>3&&void 0!==arguments[3]&&arguments[3];e&&(0,j.Wl)(e.toJS)&&(e=e.toJS());let a=void 0!==r||e&&void 0!==e.example||e&&void 0!==e.default;const s=!a&&e&&e.oneOf&&e.oneOf.length>0,u=!a&&e&&e.anyOf&&e.anyOf.length>0;if(!a&&(s||u)){const r=(0,j.mz)(s?e.oneOf[0]:e.anyOf[0]);if(B(r,e,t),!e.xml&&r.xml&&(e.xml=r.xml),void 0!==e.example&&void 0!==r.example)a=!0;else if(r.properties){e.properties||(e.properties={});let n=(0,j.mz)(r.properties);for(let a in n){var p;if(Object.prototype.hasOwnProperty.call(n,a))if(!n[a]||!n[a].deprecated)if(!n[a]||!n[a].readOnly||t.includeReadOnly)if(!n[a]||!n[a].writeOnly||t.includeWriteOnly)if(!e.properties[a])e.properties[a]=n[a],!r.required&&l()(r.required)&&-1!==o()(p=r.required).call(p,a)&&(e.required?e.required.push(a):e.required=[a])}}}const h={};let{xml:m,type:v,example:b,properties:E,additionalProperties:x,items:_}=e||{},{includeReadOnly:S,includeWriteOnly:A}=t;m=m||{};let k,{name:C,prefix:I,namespace:N}=m,L={};if(n&&(C=C||"notagname",k=(I?I+":":"")+C,N)){h[I?"xmlns:"+I:"xmlns"]=N}n&&(L[k]=[]);const z=t=>f()(t).call(t,(t=>Object.prototype.hasOwnProperty.call(e,t)));e&&!v&&(E||x||z(R)?v="object":_||z(M)?v="array":z(D)?(v="number",e.type="number"):a||e.enum||(v="string",e.type="string"));const U=t=>{var r,n,o,a,i;null!==(null===(r=e)||void 0===r?void 0:r.maxItems)&&void 0!==(null===(n=e)||void 0===n?void 0:n.maxItems)&&(t=d()(t).call(t,0,null===(i=e)||void 0===i?void 0:i.maxItems));if(null!==(null===(o=e)||void 0===o?void 0:o.minItems)&&void 0!==(null===(a=e)||void 0===a?void 0:a.minItems)){let r=0;for(;t.length<(null===(s=e)||void 0===s?void 0:s.minItems);){var s;t.push(t[r++%t.length])}}return t},q=(0,j.mz)(E);let V,$=0;const W=()=>e&&null!==e.maxProperties&&void 0!==e.maxProperties&&$>=e.maxProperties,H=()=>{if(!e||!e.required)return 0;let t=0;var r,o;n?i()(r=e.required).call(r,(e=>t+=void 0===L[e]?0:1)):i()(o=e.required).call(o,(e=>{var r;return t+=void 0===(null===(r=L[k])||void 0===r?void 0:g()(r).call(r,(t=>void 0!==t[e])))?0:1}));return e.required.length-t},J=t=>{var r;return!(e&&e.required&&e.required.length)||!c()(r=e.required).call(r,t)},K=t=>!e||null===e.maxProperties||void 0===e.maxProperties||!W()&&(!J(t)||e.maxProperties-$-H()>0);if(V=n?function(r){let o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:void 0;if(e&&q[r]){if(q[r].xml=q[r].xml||{},q[r].xml.attribute){const e=l()(q[r].enum)?q[r].enum[0]:void 0,t=q[r].example,n=q[r].default;return void(h[q[r].xml.name||r]=void 0!==t?t:void 0!==n?n:void 0!==e?e:T(q[r]))}q[r].xml.name=q[r].xml.name||r}else q[r]||!1===x||(q[r]={xml:{name:r}});let a=F(e&&q[r]||void 0,t,o,n);var i;K(r)&&($++,l()(a)?L[k]=y()(i=L[k]).call(i,a):L[k].push(a))}:(r,o)=>{if(K(r)){if(Object.prototype.hasOwnProperty.call(e,"discriminator")&&e.discriminator&&Object.prototype.hasOwnProperty.call(e.discriminator,"mapping")&&e.discriminator.mapping&&Object.prototype.hasOwnProperty.call(e,"$$ref")&&e.$$ref&&e.discriminator.propertyName===r){for(let t in e.discriminator.mapping)if(-1!==e.$$ref.search(e.discriminator.mapping[t])){L[r]=t;break}}else L[r]=F(q[r],t,o,n);$++}},a){let o;if(o=P(void 0!==r?r:void 0!==b?b:e.default),!n){if("number"==typeof o&&"string"===v)return`${o}`;if("string"!=typeof o||"string"===v)return o;try{return JSON.parse(o)}catch(e){return o}}if(e||(v=l()(o)?"array":typeof o),"array"===v){if(!l()(o)){if("string"==typeof o)return o;o=[o]}const r=e?e.items:void 0;r&&(r.xml=r.xml||m||{},r.xml.name=r.xml.name||m.name);let a=w()(o).call(o,(e=>F(r,t,e,n)));return a=U(a),m.wrapped?(L[k]=a,O()(h)||L[k].push({_attr:h})):L=a,L}if("object"===v){if("string"==typeof o)return o;for(let t in o)Object.prototype.hasOwnProperty.call(o,t)&&(e&&q[t]&&q[t].readOnly&&!S||e&&q[t]&&q[t].writeOnly&&!A||(e&&q[t]&&q[t].xml&&q[t].xml.attribute?h[q[t].xml.name||t]=o[t]:V(t,o[t])));return O()(h)||L[k].push({_attr:h}),L}return L[k]=O()(h)?o:[{_attr:h},o],L}if("object"===v){for(let e in q)Object.prototype.hasOwnProperty.call(q,e)&&(q[e]&&q[e].deprecated||q[e]&&q[e].readOnly&&!S||q[e]&&q[e].writeOnly&&!A||V(e));if(n&&h&&L[k].push({_attr:h}),W())return L;if(!0===x)n?L[k].push({additionalProp:"Anything can be here"}):L.additionalProp1={},$++;else if(x){const r=(0,j.mz)(x),o=F(r,t,void 0,n);if(n&&r.xml&&r.xml.name&&"notagname"!==r.xml.name)L[k].push(o);else{const t=null!==e.minProperties&&void 0!==e.minProperties&&$F(B(_,e,t),t,void 0,n)));else if(l()(_.oneOf)){var Y;r=w()(Y=_.oneOf).call(Y,(e=>F(B(_,e,t),t,void 0,n)))}else{if(!(!n||n&&m.wrapped))return F(_,t,void 0,n);r=[F(_,t,void 0,n)]}return r=U(r),n&&m.wrapped?(L[k]=r,O()(h)||L[k].push({_attr:h}),L):r}let Q;if(e&&l()(e.enum))Q=(0,j.AF)(e.enum)[0];else{if(!e)return;if(Q=T(e),"number"==typeof Q){let t=e.minimum;null!=t&&(e.exclusiveMinimum&&t++,Q=t);let r=e.maximum;null!=r&&(e.exclusiveMaximum&&r--,Q=r)}if("string"==typeof Q&&(null!==e.maxLength&&void 0!==e.maxLength&&(Q=d()(Q).call(Q,0,e.maxLength)),null!==e.minLength&&void 0!==e.minLength)){let t=0;for(;Q.length(e.schema&&(e=e.schema),e.properties&&(e.type="object"),e),U=(e,t,r)=>{const n=F(e,t,r,!0);if(n)return"string"==typeof n?n:S()(n,{declaration:!0,indent:"\t"})},q=(e,t,r)=>F(e,t,r,!1),V=(e,t,r)=>[e,x()(t),x()(r)],$=(0,I.Z)(U,V),W=(0,I.Z)(q,V)},8883:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(57050);function o(){return{fn:n}}},51228:(e,t,r)=>{"use strict";r.r(t),r.d(t,{UPDATE_SPEC:()=>U,UPDATE_URL:()=>q,UPDATE_JSON:()=>V,UPDATE_PARAM:()=>$,UPDATE_EMPTY_PARAM_INCLUSION:()=>W,VALIDATE_PARAMS:()=>H,SET_RESPONSE:()=>J,SET_REQUEST:()=>K,SET_MUTATED_REQUEST:()=>G,LOG_REQUEST:()=>Z,CLEAR_RESPONSE:()=>Y,CLEAR_REQUEST:()=>Q,CLEAR_VALIDATE_PARAMS:()=>X,UPDATE_OPERATION_META_VALUE:()=>ee,UPDATE_RESOLVED:()=>te,UPDATE_RESOLVED_SUBTREE:()=>re,SET_SCHEME:()=>ne,updateSpec:()=>oe,updateResolved:()=>ae,updateUrl:()=>ie,updateJsonSpec:()=>se,parseToJson:()=>le,resolveSpec:()=>ce,requestResolvedSubtree:()=>he,changeParam:()=>de,changeParamByIdentity:()=>me,updateResolvedSubtree:()=>ge,invalidateResolvedSubtreeCache:()=>ve,validateParams:()=>ye,updateEmptyParamInclusion:()=>be,clearValidateParams:()=>we,changeConsumesValue:()=>Ee,changeProducesValue:()=>xe,setResponse:()=>_e,setRequest:()=>Se,setMutatedRequest:()=>Ae,logRequest:()=>ke,executeRequest:()=>Ce,execute:()=>Oe,clearResponse:()=>je,clearRequest:()=>Ie,setScheme:()=>Ne});var n=r(58309),o=r.n(n),a=r(97606),i=r.n(a),s=r(96718),l=r.n(s),u=r(24282),c=r.n(u),p=r(2250),f=r.n(p),h=r(6226),d=r.n(h),m=r(14418),g=r.n(m),v=r(3665),y=r.n(v),b=r(11882),w=r.n(b),E=r(86),x=r.n(E),_=r(28222),S=r.n(_),A=r(76986),k=r.n(A),C=r(70586),O=r.n(C),j=r(1272),I=r(43393),N=r(84564),T=r.n(N),P=r(7710),R=r(47037),M=r.n(R),D=r(23279),L=r.n(D),B=r(36968),F=r.n(B),z=r(90242);const U="spec_update_spec",q="spec_update_url",V="spec_update_json",$="spec_update_param",W="spec_update_empty_param_inclusion",H="spec_validate_param",J="spec_set_response",K="spec_set_request",G="spec_set_mutated_request",Z="spec_log_request",Y="spec_clear_response",Q="spec_clear_request",X="spec_clear_validate_param",ee="spec_update_operation_meta_value",te="spec_update_resolved",re="spec_update_resolved_subtree",ne="set_scheme";function oe(e){const t=(r=e,M()(r)?r:"").replace(/\t/g," ");var r;if("string"==typeof e)return{type:U,payload:t}}function ae(e){return{type:te,payload:e}}function ie(e){return{type:q,payload:e}}function se(e){return{type:V,payload:e}}const le=e=>t=>{let{specActions:r,specSelectors:n,errActions:o}=t,{specStr:a}=n,i=null;try{e=e||a(),o.clear({source:"parser"}),i=j.ZP.load(e,{schema:j.A8})}catch(e){return console.error(e),o.newSpecErr({source:"parser",level:"error",message:e.reason,line:e.mark&&e.mark.line?e.mark.line+1:void 0})}return i&&"object"==typeof i?r.updateJsonSpec(i):{}};let ue=!1;const ce=(e,t)=>r=>{let{specActions:n,specSelectors:a,errActions:s,fn:{fetch:u,resolve:c,AST:p={}},getConfigs:f}=r;ue||(console.warn("specActions.resolveSpec is deprecated since v3.10.0 and will be removed in v4.0.0; use requestResolvedSubtree instead!"),ue=!0);const{modelPropertyMacro:h,parameterMacro:d,requestInterceptor:m,responseInterceptor:g}=f();void 0===e&&(e=a.specJson()),void 0===t&&(t=a.url());let v=p.getLineNumberForPath?p.getLineNumberForPath:()=>{},y=a.specStr();return c({fetch:u,spec:e,baseDoc:t,modelPropertyMacro:h,parameterMacro:d,requestInterceptor:m,responseInterceptor:g}).then((e=>{let{spec:t,errors:r}=e;if(s.clear({type:"thrown"}),o()(r)&&r.length>0){let e=i()(r).call(r,(e=>(console.error(e),e.line=e.fullPath?v(y,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",l()(e,"message",{enumerable:!0,value:e.message}),e)));s.newThrownErrBatch(e)}return n.updateResolved(t)}))};let pe=[];const fe=L()((async()=>{const e=pe.system;if(!e)return void console.error("debResolveSubtrees: don't have a system to operate on, aborting.");const{errActions:t,errSelectors:r,fn:{resolveSubtree:n,fetch:a,AST:s={}},specSelectors:u,specActions:p}=e;if(!n)return void console.error("Error: Swagger-Client did not provide a `resolveSubtree` method, doing nothing.");let h=s.getLineNumberForPath?s.getLineNumberForPath:()=>{};const m=u.specStr(),{modelPropertyMacro:v,parameterMacro:b,requestInterceptor:w,responseInterceptor:E}=e.getConfigs();try{var x=await c()(pe).call(pe,(async(e,s)=>{const{resultMap:c,specWithCurrentSubtrees:p}=await e,{errors:x,spec:_}=await n(p,s,{baseDoc:u.url(),modelPropertyMacro:v,parameterMacro:b,requestInterceptor:w,responseInterceptor:E});if(r.allErrors().size&&t.clearBy((e=>{var t;return"thrown"!==e.get("type")||"resolver"!==e.get("source")||!f()(t=e.get("fullPath")).call(t,((e,t)=>e===s[t]||void 0===s[t]))})),o()(x)&&x.length>0){let e=i()(x).call(x,(e=>(e.line=e.fullPath?h(m,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",l()(e,"message",{enumerable:!0,value:e.message}),e)));t.newThrownErrBatch(e)}var S,A;_&&u.isOAS3()&&"components"===s[0]&&"securitySchemes"===s[1]&&await d().all(i()(S=g()(A=y()(_)).call(A,(e=>"openIdConnect"===e.type))).call(S,(async e=>{const t={url:e.openIdConnectUrl,requestInterceptor:w,responseInterceptor:E};try{const r=await a(t);r instanceof Error||r.status>=400?console.error(r.statusText+" "+t.url):e.openIdConnectData=JSON.parse(r.text)}catch(e){console.error(e)}})));return F()(c,s,_),F()(p,s,_),{resultMap:c,specWithCurrentSubtrees:p}}),d().resolve({resultMap:(u.specResolvedSubtree([])||(0,I.Map)()).toJS(),specWithCurrentSubtrees:u.specJson().toJS()}));delete pe.system,pe=[]}catch(e){console.error(e)}p.updateResolvedSubtree([],x.resultMap)}),35),he=e=>t=>{var r;w()(r=i()(pe).call(pe,(e=>e.join("@@")))).call(r,e.join("@@"))>-1||(pe.push(e),pe.system=t,fe())};function de(e,t,r,n,o){return{type:$,payload:{path:e,value:n,paramName:t,paramIn:r,isXml:o}}}function me(e,t,r,n){return{type:$,payload:{path:e,param:t,value:r,isXml:n}}}const ge=(e,t)=>({type:re,payload:{path:e,value:t}}),ve=()=>({type:re,payload:{path:[],value:(0,I.Map)()}}),ye=(e,t)=>({type:H,payload:{pathMethod:e,isOAS3:t}}),be=(e,t,r,n)=>({type:W,payload:{pathMethod:e,paramName:t,paramIn:r,includeEmptyValue:n}});function we(e){return{type:X,payload:{pathMethod:e}}}function Ee(e,t){return{type:ee,payload:{path:e,value:t,key:"consumes_value"}}}function xe(e,t){return{type:ee,payload:{path:e,value:t,key:"produces_value"}}}const _e=(e,t,r)=>({payload:{path:e,method:t,res:r},type:J}),Se=(e,t,r)=>({payload:{path:e,method:t,req:r},type:K}),Ae=(e,t,r)=>({payload:{path:e,method:t,req:r},type:G}),ke=e=>({payload:e,type:Z}),Ce=e=>t=>{let{fn:r,specActions:n,specSelectors:a,getConfigs:s,oas3Selectors:l}=t,{pathName:u,method:c,operation:p}=e,{requestInterceptor:f,responseInterceptor:h}=s(),d=p.toJS();var m,v;p&&p.get("parameters")&&x()(m=g()(v=p.get("parameters")).call(v,(e=>e&&!0===e.get("allowEmptyValue")))).call(m,(t=>{if(a.parameterInclusionSettingFor([u,c],t.get("name"),t.get("in"))){e.parameters=e.parameters||{};const r=(0,z.cz)(t,e.parameters);(!r||r&&0===r.size)&&(e.parameters[t.get("name")]="")}}));if(e.contextUrl=T()(a.url()).toString(),d&&d.operationId?e.operationId=d.operationId:d&&u&&c&&(e.operationId=r.opId(d,u,c)),a.isOAS3()){const t=`${u}:${c}`;e.server=l.selectedServer(t)||l.selectedServer();const r=l.serverVariables({server:e.server,namespace:t}).toJS(),n=l.serverVariables({server:e.server}).toJS();e.serverVariables=S()(r).length?r:n,e.requestContentType=l.requestContentType(u,c),e.responseContentType=l.responseContentType(u,c)||"*/*";const a=l.requestBodyValue(u,c),s=l.requestBodyInclusionSetting(u,c);var y;if(a&&a.toJS)e.requestBody=g()(y=i()(a).call(a,(e=>I.Map.isMap(e)?e.get("value"):e))).call(y,((e,t)=>(o()(e)?0!==e.length:!(0,z.O2)(e))||s.get(t))).toJS();else e.requestBody=a}let b=k()({},e);b=r.buildRequest(b),n.setRequest(e.pathName,e.method,b);e.requestInterceptor=async t=>{let r=await f.apply(void 0,[t]),o=k()({},r);return n.setMutatedRequest(e.pathName,e.method,o),r},e.responseInterceptor=h;const w=O()();return r.execute(e).then((t=>{t.duration=O()()-w,n.setResponse(e.pathName,e.method,t)})).catch((t=>{"Failed to fetch"===t.message&&(t.name="",t.message='**Failed to fetch.** \n**Possible Reasons:** \n - CORS \n - Network Failure \n - URL scheme must be "http" or "https" for CORS request.'),n.setResponse(e.pathName,e.method,{error:!0,err:(0,P.serializeError)(t)})}))},Oe=function(){let{path:e,method:t,...r}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return n=>{let{fn:{fetch:o},specSelectors:a,specActions:i}=n,s=a.specJsonWithResolvedSubtrees().toJS(),l=a.operationScheme(e,t),{requestContentType:u,responseContentType:c}=a.contentTypeValues([e,t]).toJS(),p=/xml/i.test(u),f=a.parameterValues([e,t],p).toJS();return i.executeRequest({...r,fetch:o,spec:s,pathName:e,method:t,parameters:f,requestContentType:u,scheme:l,responseContentType:c})}};function je(e,t){return{type:Y,payload:{path:e,method:t}}}function Ie(e,t){return{type:Q,payload:{path:e,method:t}}}function Ne(e,t,r){return{type:ne,payload:{scheme:e,path:t,method:r}}}},37038:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>s});var n=r(20032),o=r(51228),a=r(33881),i=r(77508);function s(){return{statePlugins:{spec:{wrapActions:i,reducers:n.default,actions:o,selectors:a}}}}},20032:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>d});var n=r(24282),o=r.n(n),a=r(97606),i=r.n(a),s=r(76986),l=r.n(s),u=r(43393),c=r(90242),p=r(27504),f=r(33881),h=r(51228);const d={[h.UPDATE_SPEC]:(e,t)=>"string"==typeof t.payload?e.set("spec",t.payload):e,[h.UPDATE_URL]:(e,t)=>e.set("url",t.payload+""),[h.UPDATE_JSON]:(e,t)=>e.set("json",(0,c.oG)(t.payload)),[h.UPDATE_RESOLVED]:(e,t)=>e.setIn(["resolved"],(0,c.oG)(t.payload)),[h.UPDATE_RESOLVED_SUBTREE]:(e,t)=>{const{value:r,path:n}=t.payload;return e.setIn(["resolvedSubtrees",...n],(0,c.oG)(r))},[h.UPDATE_PARAM]:(e,t)=>{let{payload:r}=t,{path:n,paramName:o,paramIn:a,param:i,value:s,isXml:l}=r,u=i?(0,c.V9)(i):`${a}.${o}`;const p=l?"value_xml":"value";return e.setIn(["meta","paths",...n,"parameters",u,p],s)},[h.UPDATE_EMPTY_PARAM_INCLUSION]:(e,t)=>{let{payload:r}=t,{pathMethod:n,paramName:o,paramIn:a,includeEmptyValue:i}=r;if(!o||!a)return console.warn("Warning: UPDATE_EMPTY_PARAM_INCLUSION could not generate a paramKey."),e;const s=`${a}.${o}`;return e.setIn(["meta","paths",...n,"parameter_inclusions",s],i)},[h.VALIDATE_PARAMS]:(e,t)=>{let{payload:{pathMethod:r,isOAS3:n}}=t;const a=(0,f.specJsonWithResolvedSubtrees)(e).getIn(["paths",...r]),i=(0,f.parameterValues)(e,r).toJS();return e.updateIn(["meta","paths",...r,"parameters"],(0,u.fromJS)({}),(t=>{var s;return o()(s=a.get("parameters",(0,u.List)())).call(s,((t,o)=>{const a=(0,c.cz)(o,i),s=(0,f.parameterInclusionSettingFor)(e,r,o.get("name"),o.get("in")),l=(0,c.Ik)(o,a,{bypassRequiredCheck:s,isOAS3:n});return t.setIn([(0,c.V9)(o),"errors"],(0,u.fromJS)(l))}),t)}))},[h.CLEAR_VALIDATE_PARAMS]:(e,t)=>{let{payload:{pathMethod:r}}=t;return e.updateIn(["meta","paths",...r,"parameters"],(0,u.fromJS)([]),(e=>i()(e).call(e,(e=>e.set("errors",(0,u.fromJS)([]))))))},[h.SET_RESPONSE]:(e,t)=>{let r,{payload:{res:n,path:o,method:a}}=t;r=n.error?l()({error:!0,name:n.err.name,message:n.err.message,statusCode:n.err.statusCode},n.err.response):n,r.headers=r.headers||{};let i=e.setIn(["responses",o,a],(0,c.oG)(r));return p.Z.Blob&&n.data instanceof p.Z.Blob&&(i=i.setIn(["responses",o,a,"text"],n.data)),i},[h.SET_REQUEST]:(e,t)=>{let{payload:{req:r,path:n,method:o}}=t;return e.setIn(["requests",n,o],(0,c.oG)(r))},[h.SET_MUTATED_REQUEST]:(e,t)=>{let{payload:{req:r,path:n,method:o}}=t;return e.setIn(["mutatedRequests",n,o],(0,c.oG)(r))},[h.UPDATE_OPERATION_META_VALUE]:(e,t)=>{let{payload:{path:r,value:n,key:o}}=t,a=["paths",...r],i=["meta","paths",...r];return e.getIn(["json",...a])||e.getIn(["resolved",...a])||e.getIn(["resolvedSubtrees",...a])?e.setIn([...i,o],(0,u.fromJS)(n)):e},[h.CLEAR_RESPONSE]:(e,t)=>{let{payload:{path:r,method:n}}=t;return e.deleteIn(["responses",r,n])},[h.CLEAR_REQUEST]:(e,t)=>{let{payload:{path:r,method:n}}=t;return e.deleteIn(["requests",r,n])},[h.SET_SCHEME]:(e,t)=>{let{payload:{scheme:r,path:n,method:o}}=t;return n&&o?e.setIn(["scheme",n,o],r):n||o?void 0:e.setIn(["scheme","_defaultScheme"],r)}}},33881:(e,t,r)=>{"use strict";r.r(t),r.d(t,{lastError:()=>O,url:()=>j,specStr:()=>I,specSource:()=>N,specJson:()=>T,specResolved:()=>P,specResolvedSubtree:()=>R,specJsonWithResolvedSubtrees:()=>D,spec:()=>L,isOAS3:()=>B,info:()=>F,externalDocs:()=>z,version:()=>U,semver:()=>q,paths:()=>V,operations:()=>$,consumes:()=>W,produces:()=>H,security:()=>J,securityDefinitions:()=>K,findDefinition:()=>G,definitions:()=>Z,basePath:()=>Y,host:()=>Q,schemes:()=>X,operationsWithRootInherited:()=>ee,tags:()=>te,tagDetails:()=>re,operationsWithTags:()=>ne,taggedOperations:()=>oe,responses:()=>ae,requests:()=>ie,mutatedRequests:()=>se,responseFor:()=>le,requestFor:()=>ue,mutatedRequestFor:()=>ce,allowTryItOutFor:()=>pe,parameterWithMetaByIdentity:()=>fe,parameterInclusionSettingFor:()=>he,parameterWithMeta:()=>de,operationWithMeta:()=>me,getParameter:()=>ge,hasHost:()=>ve,parameterValues:()=>ye,parametersIncludeIn:()=>be,parametersIncludeType:()=>we,contentTypeValues:()=>Ee,currentProducesFor:()=>xe,producesOptionsFor:()=>_e,consumesOptionsFor:()=>Se,operationScheme:()=>Ae,canExecuteScheme:()=>ke,validationErrors:()=>Ce,validateBeforeExecute:()=>Oe,getOAS3RequiredRequestBodyContentType:()=>je,isMediaTypeSchemaPropertiesEqual:()=>Ie});var n=r(24278),o=r.n(n),a=r(86),i=r.n(a),s=r(11882),l=r.n(s),u=r(97606),c=r.n(u),p=r(14418),f=r.n(p),h=r(51679),d=r.n(h),m=r(24282),g=r.n(m),v=r(2578),y=r.n(v),b=r(92039),w=r.n(b),E=r(58309),x=r.n(E),_=r(20573),S=r(90242),A=r(43393);const k=["get","put","post","delete","options","head","patch","trace"],C=e=>e||(0,A.Map)(),O=(0,_.P1)(C,(e=>e.get("lastError"))),j=(0,_.P1)(C,(e=>e.get("url"))),I=(0,_.P1)(C,(e=>e.get("spec")||"")),N=(0,_.P1)(C,(e=>e.get("specSource")||"not-editor")),T=(0,_.P1)(C,(e=>e.get("json",(0,A.Map)()))),P=(0,_.P1)(C,(e=>e.get("resolved",(0,A.Map)()))),R=(e,t)=>e.getIn(["resolvedSubtrees",...t],void 0),M=(e,t)=>A.Map.isMap(e)&&A.Map.isMap(t)?t.get("$$ref")?t:(0,A.OrderedMap)().mergeWith(M,e,t):t,D=(0,_.P1)(C,(e=>(0,A.OrderedMap)().mergeWith(M,e.get("json"),e.get("resolvedSubtrees")))),L=e=>T(e),B=(0,_.P1)(L,(()=>!1)),F=(0,_.P1)(L,(e=>Ne(e&&e.get("info")))),z=(0,_.P1)(L,(e=>Ne(e&&e.get("externalDocs")))),U=(0,_.P1)(F,(e=>e&&e.get("version"))),q=(0,_.P1)(U,(e=>{var t;return o()(t=/v?([0-9]*)\.([0-9]*)\.([0-9]*)/i.exec(e)).call(t,1)})),V=(0,_.P1)(D,(e=>e.get("paths"))),$=(0,_.P1)(V,(e=>{if(!e||e.size<1)return(0,A.List)();let t=(0,A.List)();return e&&i()(e)?(i()(e).call(e,((e,r)=>{if(!e||!i()(e))return{};i()(e).call(e,((e,n)=>{l()(k).call(k,n)<0||(t=t.push((0,A.fromJS)({path:r,method:n,operation:e,id:`${n}-${r}`})))}))})),t):(0,A.List)()})),W=(0,_.P1)(L,(e=>(0,A.Set)(e.get("consumes")))),H=(0,_.P1)(L,(e=>(0,A.Set)(e.get("produces")))),J=(0,_.P1)(L,(e=>e.get("security",(0,A.List)()))),K=(0,_.P1)(L,(e=>e.get("securityDefinitions"))),G=(e,t)=>{const r=e.getIn(["resolvedSubtrees","definitions",t],null),n=e.getIn(["json","definitions",t],null);return r||n||null},Z=(0,_.P1)(L,(e=>{const t=e.get("definitions");return A.Map.isMap(t)?t:(0,A.Map)()})),Y=(0,_.P1)(L,(e=>e.get("basePath"))),Q=(0,_.P1)(L,(e=>e.get("host"))),X=(0,_.P1)(L,(e=>e.get("schemes",(0,A.Map)()))),ee=(0,_.P1)($,W,H,((e,t,r)=>c()(e).call(e,(e=>e.update("operation",(e=>{if(e){if(!A.Map.isMap(e))return;return e.withMutations((e=>(e.get("consumes")||e.update("consumes",(e=>(0,A.Set)(e).merge(t))),e.get("produces")||e.update("produces",(e=>(0,A.Set)(e).merge(r))),e)))}return(0,A.Map)()})))))),te=(0,_.P1)(L,(e=>{const t=e.get("tags",(0,A.List)());return A.List.isList(t)?f()(t).call(t,(e=>A.Map.isMap(e))):(0,A.List)()})),re=(e,t)=>{var r;let n=te(e)||(0,A.List)();return d()(r=f()(n).call(n,A.Map.isMap)).call(r,(e=>e.get("name")===t),(0,A.Map)())},ne=(0,_.P1)(ee,te,((e,t)=>g()(e).call(e,((e,t)=>{let r=(0,A.Set)(t.getIn(["operation","tags"]));return r.count()<1?e.update("default",(0,A.List)(),(e=>e.push(t))):g()(r).call(r,((e,r)=>e.update(r,(0,A.List)(),(e=>e.push(t)))),e)}),g()(t).call(t,((e,t)=>e.set(t.get("name"),(0,A.List)())),(0,A.OrderedMap)())))),oe=e=>t=>{var r;let{getConfigs:n}=t,{tagsSorter:o,operationsSorter:a}=n();return c()(r=ne(e).sortBy(((e,t)=>t),((e,t)=>{let r="function"==typeof o?o:S.wh.tagsSorter[o];return r?r(e,t):null}))).call(r,((t,r)=>{let n="function"==typeof a?a:S.wh.operationsSorter[a],o=n?y()(t).call(t,n):t;return(0,A.Map)({tagDetails:re(e,r),operations:o})}))},ae=(0,_.P1)(C,(e=>e.get("responses",(0,A.Map)()))),ie=(0,_.P1)(C,(e=>e.get("requests",(0,A.Map)()))),se=(0,_.P1)(C,(e=>e.get("mutatedRequests",(0,A.Map)()))),le=(e,t,r)=>ae(e).getIn([t,r],null),ue=(e,t,r)=>ie(e).getIn([t,r],null),ce=(e,t,r)=>se(e).getIn([t,r],null),pe=()=>!0,fe=(e,t,r)=>{const n=D(e).getIn(["paths",...t,"parameters"],(0,A.OrderedMap)()),o=e.getIn(["meta","paths",...t,"parameters"],(0,A.OrderedMap)()),a=c()(n).call(n,(e=>{const t=o.get(`${r.get("in")}.${r.get("name")}`),n=o.get(`${r.get("in")}.${r.get("name")}.hash-${r.hashCode()}`);return(0,A.OrderedMap)().merge(e,t,n)}));return d()(a).call(a,(e=>e.get("in")===r.get("in")&&e.get("name")===r.get("name")),(0,A.OrderedMap)())},he=(e,t,r,n)=>{const o=`${n}.${r}`;return e.getIn(["meta","paths",...t,"parameter_inclusions",o],!1)},de=(e,t,r,n)=>{const o=D(e).getIn(["paths",...t,"parameters"],(0,A.OrderedMap)()),a=d()(o).call(o,(e=>e.get("in")===n&&e.get("name")===r),(0,A.OrderedMap)());return fe(e,t,a)},me=(e,t,r)=>{var n;const o=D(e).getIn(["paths",t,r],(0,A.OrderedMap)()),a=e.getIn(["meta","paths",t,r],(0,A.OrderedMap)()),i=c()(n=o.get("parameters",(0,A.List)())).call(n,(n=>fe(e,[t,r],n)));return(0,A.OrderedMap)().merge(o,a).set("parameters",i)};function ge(e,t,r,n){t=t||[];let o=e.getIn(["meta","paths",...t,"parameters"],(0,A.fromJS)([]));return d()(o).call(o,(e=>A.Map.isMap(e)&&e.get("name")===r&&e.get("in")===n))||(0,A.Map)()}const ve=(0,_.P1)(L,(e=>{const t=e.get("host");return"string"==typeof t&&t.length>0&&"/"!==t[0]}));function ye(e,t,r){t=t||[];let n=me(e,...t).get("parameters",(0,A.List)());return g()(n).call(n,((e,t)=>{let n=r&&"body"===t.get("in")?t.get("value_xml"):t.get("value");return e.set((0,S.V9)(t,{allowHashes:!1}),n)}),(0,A.fromJS)({}))}function be(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(A.List.isList(e))return w()(e).call(e,(e=>A.Map.isMap(e)&&e.get("in")===t))}function we(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(A.List.isList(e))return w()(e).call(e,(e=>A.Map.isMap(e)&&e.get("type")===t))}function Ee(e,t){t=t||[];let r=D(e).getIn(["paths",...t],(0,A.fromJS)({})),n=e.getIn(["meta","paths",...t],(0,A.fromJS)({})),o=xe(e,t);const a=r.get("parameters")||new A.List,i=n.get("consumes_value")?n.get("consumes_value"):we(a,"file")?"multipart/form-data":we(a,"formData")?"application/x-www-form-urlencoded":void 0;return(0,A.fromJS)({requestContentType:i,responseContentType:o})}function xe(e,t){t=t||[];const r=D(e).getIn(["paths",...t],null);if(null===r)return;const n=e.getIn(["meta","paths",...t,"produces_value"],null),o=r.getIn(["produces",0],null);return n||o||"application/json"}function _e(e,t){t=t||[];const r=D(e),n=r.getIn(["paths",...t],null);if(null===n)return;const[o]=t,a=n.get("produces",null),i=r.getIn(["paths",o,"produces"],null),s=r.getIn(["produces"],null);return a||i||s}function Se(e,t){t=t||[];const r=D(e),n=r.getIn(["paths",...t],null);if(null===n)return;const[o]=t,a=n.get("consumes",null),i=r.getIn(["paths",o,"consumes"],null),s=r.getIn(["consumes"],null);return a||i||s}const Ae=(e,t,r)=>{let n=e.get("url").match(/^([a-z][a-z0-9+\-.]*):/),o=x()(n)?n[1]:null;return e.getIn(["scheme",t,r])||e.getIn(["scheme","_defaultScheme"])||o||""},ke=(e,t,r)=>{var n;return l()(n=["http","https"]).call(n,Ae(e,t,r))>-1},Ce=(e,t)=>{t=t||[];let r=e.getIn(["meta","paths",...t,"parameters"],(0,A.fromJS)([]));const n=[];return i()(r).call(r,(e=>{let t=e.get("errors");t&&t.count()&&i()(t).call(t,(e=>n.push(e)))})),n},Oe=(e,t)=>0===Ce(e,t).length,je=(e,t)=>{var r;let n={requestBody:!1,requestContentType:{}},o=e.getIn(["resolvedSubtrees","paths",...t,"requestBody"],(0,A.fromJS)([]));return o.size<1||(o.getIn(["required"])&&(n.requestBody=o.getIn(["required"])),i()(r=o.getIn(["content"]).entrySeq()).call(r,(e=>{const t=e[0];if(e[1].getIn(["schema","required"])){const r=e[1].getIn(["schema","required"]).toJS();n.requestContentType[t]=r}}))),n},Ie=(e,t,r,n)=>{if((r||n)&&r===n)return!0;let o=e.getIn(["resolvedSubtrees","paths",...t,"requestBody","content"],(0,A.fromJS)([]));if(o.size<2||!r||!n)return!1;let a=o.getIn([r,"schema","properties"],(0,A.fromJS)([])),i=o.getIn([n,"schema","properties"],(0,A.fromJS)([]));return!!a.equals(i)};function Ne(e){return A.Map.isMap(e)?e:new A.Map}},77508:(e,t,r)=>{"use strict";r.r(t),r.d(t,{updateSpec:()=>u,updateJsonSpec:()=>c,executeRequest:()=>p,validateParams:()=>f});var n=r(28222),o=r.n(n),a=r(86),i=r.n(a),s=r(27361),l=r.n(s);const u=(e,t)=>{let{specActions:r}=t;return function(){e(...arguments),r.parseToJson(...arguments)}},c=(e,t)=>{let{specActions:r}=t;return function(){for(var t=arguments.length,n=new Array(t),a=0;a{l()(u,[e]).$ref&&r.requestResolvedSubtree(["paths",e])})),r.requestResolvedSubtree(["components","securitySchemes"])}},p=(e,t)=>{let{specActions:r}=t;return t=>(r.logRequest(t),e(t))},f=(e,t)=>{let{specSelectors:r}=t;return t=>e(t,r.isOAS3())}},34852:(e,t,r)=>{"use strict";r.r(t),r.d(t,{loaded:()=>n});const n=(e,t)=>function(){e(...arguments);const r=t.getConfigs().withCredentials;void 0!==r&&(t.fn.fetch.withCredentials="string"==typeof r?"true"===r:!!r)}},48792:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>qr});var n={};r.r(n),r.d(n,{JsonPatchError:()=>Fe,_areEquals:()=>Ge,applyOperation:()=>$e,applyPatch:()=>We,applyReducer:()=>He,deepClone:()=>ze,getValueByPointer:()=>Ve,validate:()=>Ke,validator:()=>Je});var o={};r.r(o),r.d(o,{compare:()=>nt,generate:()=>tt,observe:()=>et,unobserve:()=>Xe});var a={};r.r(a),r.d(a,{cookie:()=>kr,header:()=>Ar,path:()=>xr,query:()=>_r});var i=r(80093),s=r.n(i),l=r(30222),u=r.n(l),c=r(36594),p=r.n(c),f=r(20474),h=r.n(f),d=r(67375),m=r.n(d),g=r(58118),v=r.n(g),y=r(74386),b=r.n(y),w=r(25110),E=r.n(w),x=r(35627),_=r.n(x),S=r(97606),A=r.n(S),k=r(28222),C=r.n(k),O=r(39022),j=r.n(O),I=r(2018),N=r.n(I),T=r(14418),P=r.n(T),R=(r(31905),r(80129)),M=r.n(R),D=r(1272);const L="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof self?self:window,{FormData:B,Blob:F,File:z}=L;var U=r(15687),q=r.n(U),V=r(24278),$=r.n(V),W=function(e){return":/?#[]@!$&'()*+,;=".indexOf(e)>-1},H=function(e){return/^[a-z0-9\-._~]+$/i.test(e)};function J(e){var t,r=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=r.escape,o=arguments.length>2?arguments[2]:void 0;return"number"==typeof e&&(e=e.toString()),"string"==typeof e&&e.length&&n?o?JSON.parse(e):A()(t=q()(e)).call(t,(function(e){var t,r;if(H(e))return e;if(W(e)&&"unsafe"===n)return e;var o=new TextEncoder;return A()(t=A()(r=E()(o.encode(e))).call(r,(function(e){var t;return $()(t="0".concat(e.toString(16).toUpperCase())).call(t,-2)}))).call(t,(function(e){return"%".concat(e)})).join("")})).join(""):e}function K(e){var t=e.value;return Array.isArray(t)?function(e){var t=e.key,r=e.value,n=e.style,o=e.explode,a=e.escape,i=function(e){return J(e,{escape:a})};if("simple"===n)return A()(r).call(r,(function(e){return i(e)})).join(",");if("label"===n)return".".concat(A()(r).call(r,(function(e){return i(e)})).join("."));if("matrix"===n)return A()(r).call(r,(function(e){return i(e)})).reduce((function(e,r){var n,a,i;return!e||o?j()(a=j()(i="".concat(e||"",";")).call(i,t,"=")).call(a,r):j()(n="".concat(e,",")).call(n,r)}),"");if("form"===n){var s=o?"&".concat(t,"="):",";return A()(r).call(r,(function(e){return i(e)})).join(s)}if("spaceDelimited"===n){var l=o?"".concat(t,"="):"";return A()(r).call(r,(function(e){return i(e)})).join(" ".concat(l))}if("pipeDelimited"===n){var u=o?"".concat(t,"="):"";return A()(r).call(r,(function(e){return i(e)})).join("|".concat(u))}return}(e):"object"===h()(t)?function(e){var t=e.key,r=e.value,n=e.style,o=e.explode,a=e.escape,i=function(e){return J(e,{escape:a})},s=C()(r);if("simple"===n)return s.reduce((function(e,t){var n,a,s,l=i(r[t]),u=o?"=":",",c=e?"".concat(e,","):"";return j()(n=j()(a=j()(s="".concat(c)).call(s,t)).call(a,u)).call(n,l)}),"");if("label"===n)return s.reduce((function(e,t){var n,a,s,l=i(r[t]),u=o?"=":".",c=e?"".concat(e,"."):".";return j()(n=j()(a=j()(s="".concat(c)).call(s,t)).call(a,u)).call(n,l)}),"");if("matrix"===n&&o)return s.reduce((function(e,t){var n,o,a=i(r[t]),s=e?"".concat(e,";"):";";return j()(n=j()(o="".concat(s)).call(o,t,"=")).call(n,a)}),"");if("matrix"===n)return s.reduce((function(e,n){var o,a,s=i(r[n]),l=e?"".concat(e,","):";".concat(t,"=");return j()(o=j()(a="".concat(l)).call(a,n,",")).call(o,s)}),"");if("form"===n)return s.reduce((function(e,t){var n,a,s,l,u=i(r[t]),c=e?j()(n="".concat(e)).call(n,o?"&":","):"",p=o?"=":",";return j()(a=j()(s=j()(l="".concat(c)).call(l,t)).call(s,p)).call(a,u)}),"");return}(e):function(e){var t,r=e.key,n=e.value,o=e.style,a=e.escape,i=function(e){return J(e,{escape:a})};if("simple"===o)return i(n);if("label"===o)return".".concat(i(n));if("matrix"===o)return j()(t=";".concat(r,"=")).call(t,i(n));if("form"===o)return i(n);if("deepObject"===o)return i(n,{},!0);return}(e)}const G=function(e,t){t.body=e};var Z={serializeRes:te,mergeInQueryOrForm:fe};function Y(e){return Q.apply(this,arguments)}function Q(){return Q=s()(u().mark((function e(t){var r,n,o,a,i,s=arguments;return u().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(r=s.length>1&&void 0!==s[1]?s[1]:{},"object"===h()(t)&&(t=(r=t).url),r.headers=r.headers||{},Z.mergeInQueryOrForm(r),r.headers&&C()(r.headers).forEach((function(e){var t=r.headers[e];"string"==typeof t&&(r.headers[e]=t.replace(/\n+/g," "))})),!r.requestInterceptor){e.next=12;break}return e.next=8,r.requestInterceptor(r);case 8:if(e.t0=e.sent,e.t0){e.next=11;break}e.t0=r;case 11:r=e.t0;case 12:return n=r.headers["content-type"]||r.headers["Content-Type"],/multipart\/form-data/i.test(n)&&r.body instanceof B&&(delete r.headers["content-type"],delete r.headers["Content-Type"]),e.prev=14,e.next=17,(r.userFetch||fetch)(r.url,r);case 17:return o=e.sent,e.next=20,Z.serializeRes(o,t,r);case 20:if(o=e.sent,!r.responseInterceptor){e.next=28;break}return e.next=24,r.responseInterceptor(o);case 24:if(e.t1=e.sent,e.t1){e.next=27;break}e.t1=o;case 27:o=e.t1;case 28:e.next=39;break;case 30:if(e.prev=30,e.t2=e.catch(14),o){e.next=34;break}throw e.t2;case 34:throw(a=new Error(o.statusText||"response status is ".concat(o.status))).status=o.status,a.statusCode=o.status,a.responseError=e.t2,a;case 39:if(o.ok){e.next=45;break}throw(i=new Error(o.statusText||"response status is ".concat(o.status))).status=o.status,i.statusCode=o.status,i.response=o,i;case 45:return e.abrupt("return",o);case 46:case"end":return e.stop()}}),e,null,[[14,30]])}))),Q.apply(this,arguments)}var X=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return/(json|xml|yaml|text)\b/.test(e)};function ee(e,t){return t&&(0===t.indexOf("application/json")||t.indexOf("+json")>0)?JSON.parse(e):D.ZP.load(e)}function te(e,t){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},n=r.loadSpec,o=void 0!==n&&n,a={ok:e.ok,url:e.url||t,status:e.status,statusText:e.statusText,headers:ne(e.headers)},i=a.headers["content-type"],s=o||X(i),l=s?e.text:e.blob||e.buffer;return l.call(e).then((function(e){if(a.text=e,a.data=e,s)try{var t=ee(e,i);a.body=t,a.obj=t}catch(e){a.parseError=e}return a}))}function re(e){return v()(e).call(e,", ")?e.split(", "):e}function ne(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return"function"!=typeof b()(e)?{}:E()(b()(e).call(e)).reduce((function(e,t){var r=m()(t,2),n=r[0],o=r[1];return e[n]=re(o),e}),{})}function oe(e,t){return t||"undefined"==typeof navigator||(t=navigator),t&&"ReactNative"===t.product?!(!e||"object"!==h()(e)||"string"!=typeof e.uri):void 0!==z&&e instanceof z||(void 0!==F&&e instanceof F||(!!ArrayBuffer.isView(e)||null!==e&&"object"===h()(e)&&"function"==typeof e.pipe))}function ae(e,t){return Array.isArray(e)&&e.some((function(e){return oe(e,t)}))}var ie={form:",",spaceDelimited:"%20",pipeDelimited:"|"},se={csv:",",ssv:"%20",tsv:"%09",pipes:"|"};function le(e,t){var r=arguments.length>2&&void 0!==arguments[2]&&arguments[2],n=t.collectionFormat,o=t.allowEmptyValue,a=t.serializationOption,i=t.encoding,s="object"!==h()(t)||Array.isArray(t)?t:t.value,l=r?function(e){return e.toString()}:function(e){return encodeURIComponent(e)},u=l(e);if(void 0===s&&o)return[[u,""]];if(oe(s)||ae(s))return[[u,s]];if(a)return ue(e,s,r,a);if(i){if([h()(i.style),h()(i.explode),h()(i.allowReserved)].some((function(e){return"undefined"!==e}))){var c=i.style,p=i.explode,f=i.allowReserved;return ue(e,s,r,{style:c,explode:p,allowReserved:f})}if(i.contentType){if("application/json"===i.contentType){var d="string"==typeof s?s:_()(s);return[[u,l(d)]]}return[[u,l(s.toString())]]}return"object"!==h()(s)?[[u,l(s)]]:Array.isArray(s)&&s.every((function(e){return"object"!==h()(e)}))?[[u,A()(s).call(s,l).join(",")]]:[[u,l(_()(s))]]}return"object"!==h()(s)?[[u,l(s)]]:Array.isArray(s)?"multi"===n?[[u,A()(s).call(s,l)]]:[[u,A()(s).call(s,l).join(se[n||"csv"])]]:[[u,""]]}function ue(e,t,r,n){var o,a,i,s=n.style||"form",l=void 0===n.explode?"form"===s:n.explode,u=!r&&(n&&n.allowReserved?"unsafe":"reserved"),c=function(e){return J(e,{escape:u})},p=r?function(e){return e}:function(e){return J(e,{escape:u})};return"object"!==h()(t)?[[p(e),c(t)]]:Array.isArray(t)?l?[[p(e),A()(t).call(t,c)]]:[[p(e),A()(t).call(t,c).join(ie[s])]]:"deepObject"===s?A()(a=C()(t)).call(a,(function(r){var n;return[p(j()(n="".concat(e,"[")).call(n,r,"]")),c(t[r])]})):l?A()(i=C()(t)).call(i,(function(e){return[p(e),c(t[e])]})):[[p(e),A()(o=C()(t)).call(o,(function(e){var r;return[j()(r="".concat(p(e),",")).call(r,c(t[e]))]})).join(",")]]}function ce(e){return N()(e).reduce((function(e,t){var r,n=m()(t,2),o=n[0],a=n[1],i=p()(le(o,a,!0));try{for(i.s();!(r=i.n()).done;){var s=m()(r.value,2),l=s[0],u=s[1];if(Array.isArray(u)){var c,f=p()(u);try{for(f.s();!(c=f.n()).done;){var h=c.value;if(ArrayBuffer.isView(h)){var d=new F([h]);e.append(l,d)}else e.append(l,h)}}catch(e){f.e(e)}finally{f.f()}}else if(ArrayBuffer.isView(u)){var g=new F([u]);e.append(l,g)}else e.append(l,u)}}catch(e){i.e(e)}finally{i.f()}return e}),new B)}function pe(e){var t=C()(e).reduce((function(t,r){var n,o=p()(le(r,e[r]));try{for(o.s();!(n=o.n()).done;){var a=m()(n.value,2),i=a[0],s=a[1];t[i]=s}}catch(e){o.e(e)}finally{o.f()}return t}),{});return M().stringify(t,{encode:!1,indices:!1})||""}function fe(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.url,r=void 0===t?"":t,n=e.query,o=e.form,a=function(){for(var e=arguments.length,t=new Array(e),r=0;r=48&&t<=57))return!1;r++}return!0}function Re(e){return-1===e.indexOf("/")&&-1===e.indexOf("~")?e:e.replace(/~/g,"~0").replace(/\//g,"~1")}function Me(e){return e.replace(/~1/g,"/").replace(/~0/g,"~")}function De(e){if(void 0===e)return!0;if(e)if(Array.isArray(e)){for(var t=0,r=e.length;t0&&"constructor"==s[u-1]))throw new TypeError("JSON-Patch: modifying `__proto__` or `constructor/prototype` prop is banned for security reasons, if this was on purpose, please set `banPrototypeModifications` flag false and pass it to this function. More info in fast-json-patch README");if(r&&void 0===p&&(void 0===l[f]?p=s.slice(0,u).join("/"):u==c-1&&(p=t.path),void 0!==p&&h(t,0,e,p)),u++,Array.isArray(l)){if("-"===f)f=l.length;else{if(r&&!Pe(f))throw new Fe("Expected an unsigned base-10 integer value, making the new referenced value the array element with the zero-based index","OPERATION_PATH_ILLEGAL_ARRAY_INDEX",a,t,e);Pe(f)&&(f=~~f)}if(u>=c){if(r&&"add"===t.op&&f>l.length)throw new Fe("The specified index MUST NOT be greater than the number of elements in the array","OPERATION_VALUE_OUT_OF_BOUNDS",a,t,e);if(!1===(i=qe[t.op].call(t,l,f,e)).test)throw new Fe("Test operation failed","TEST_OPERATION_FAILED",a,t,e);return i}}else if(u>=c){if(!1===(i=Ue[t.op].call(t,l,f,e)).test)throw new Fe("Test operation failed","TEST_OPERATION_FAILED",a,t,e);return i}if(l=l[f],r&&u0)throw new Fe('Operation `path` property must start with "/"',"OPERATION_PATH_INVALID",t,e,r);if(("move"===e.op||"copy"===e.op)&&"string"!=typeof e.from)throw new Fe("Operation `from` property is not present (applicable in `move` and `copy` operations)","OPERATION_FROM_REQUIRED",t,e,r);if(("add"===e.op||"replace"===e.op||"test"===e.op)&&void 0===e.value)throw new Fe("Operation `value` property is not present (applicable in `add`, `replace` and `test` operations)","OPERATION_VALUE_REQUIRED",t,e,r);if(("add"===e.op||"replace"===e.op||"test"===e.op)&&De(e.value))throw new Fe("Operation `value` property is not present (applicable in `add`, `replace` and `test` operations)","OPERATION_VALUE_CANNOT_CONTAIN_UNDEFINED",t,e,r);if(r)if("add"==e.op){var o=e.path.split("/").length,a=n.split("/").length;if(o!==a+1&&o!==a)throw new Fe("Cannot perform an `add` operation at the desired path","OPERATION_PATH_CANNOT_ADD",t,e,r)}else if("replace"===e.op||"remove"===e.op||"_get"===e.op){if(e.path!==n)throw new Fe("Cannot perform the operation at a path that does not exist","OPERATION_PATH_UNRESOLVABLE",t,e,r)}else if("move"===e.op||"copy"===e.op){var i=Ke([{op:"_get",path:e.from,value:void 0}],r);if(i&&"OPERATION_PATH_UNRESOLVABLE"===i.name)throw new Fe("Cannot perform the operation from a path that does not exist","OPERATION_FROM_UNRESOLVABLE",t,e,r)}}function Ke(e,t,r){try{if(!Array.isArray(e))throw new Fe("Patch sequence must be an array","SEQUENCE_NOT_AN_ARRAY");if(t)We(Te(t),Te(e),r||!0);else{r=r||Je;for(var n=0;n0&&(e.patches=[],e.callback&&e.callback(n)),n}function rt(e,t,r,n,o){if(t!==e){"function"==typeof t.toJSON&&(t=t.toJSON());for(var a=Ne(t),i=Ne(e),s=!1,l=i.length-1;l>=0;l--){var u=e[p=i[l]];if(!Ie(t,p)||void 0===t[p]&&void 0!==u&&!1===Array.isArray(t))Array.isArray(e)===Array.isArray(t)?(o&&r.push({op:"test",path:n+"/"+Re(p),value:Te(u)}),r.push({op:"remove",path:n+"/"+Re(p)}),s=!0):(o&&r.push({op:"test",path:n,value:e}),r.push({op:"replace",path:n,value:t}),!0);else{var c=t[p];"object"==typeof u&&null!=u&&"object"==typeof c&&null!=c&&Array.isArray(u)===Array.isArray(c)?rt(u,c,r,n+"/"+Re(p),o):u!==c&&(!0,o&&r.push({op:"test",path:n+"/"+Re(p),value:Te(u)}),r.push({op:"replace",path:n+"/"+Re(p),value:Te(c)}))}}if(s||a.length!=i.length)for(l=0;l0){var o=t(e,r[r.length-1],r);o&&(n=j()(n).call(n,o))}if(Array.isArray(e)){var a=A()(e).call(e,(function(e,n){return pt(e,t,j()(r).call(r,n))}));a&&(n=j()(n).call(n,a))}else if(mt(e)){var i,s=A()(i=C()(e)).call(i,(function(n){return pt(e[n],t,j()(r).call(r,n))}));s&&(n=j()(n).call(n,s))}return n=ht(n)}function ft(e){return Array.isArray(e)?e:[e]}function ht(e){var t;return j()(t=[]).apply(t,q()(A()(e).call(e,(function(e){return Array.isArray(e)?ht(e):e}))))}function dt(e){return P()(e).call(e,(function(e){return void 0!==e}))}function mt(e){return e&&"object"===h()(e)}function gt(e){return e&&"function"==typeof e}function vt(e){if(wt(e)){var t=e.op;return"add"===t||"remove"===t||"replace"===t}return!1}function yt(e){return vt(e)||wt(e)&&"mutation"===e.type}function bt(e){return yt(e)&&("add"===e.op||"replace"===e.op||"merge"===e.op||"mergeDeep"===e.op)}function wt(e){return e&&"object"===h()(e)}function Et(e,t){try{return Ve(e,t)}catch(e){return console.error(e),{}}}var xt=r(28886),_t=r.n(xt),St=r(37659),At=r.n(St),kt=r(8575);function Ct(e,t){function r(){Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=(new Error).stack;for(var e=arguments.length,r=new Array(e),n=0;n-1&&-1===Nt.indexOf(r)||Tt.indexOf(n)>-1||Pt.some((function(e){return n.indexOf(e)>-1}))}function Mt(e,t){var r,n=e.split("#"),o=m()(n,2),a=o[0],i=o[1],s=kt.resolve(a||"",t||"");return i?j()(r="".concat(s,"#")).call(r,i):s}var Dt="application/json, application/yaml",Lt=/^([a-z]+:\/\/|\/\/)/i,Bt=Ct("JSONRefError",(function(e,t,r){this.originalError=r,Ee()(this,t||{})})),Ft={},zt=new(_t()),Ut=[function(e){return"paths"===e[0]&&"responses"===e[3]&&"examples"===e[5]},function(e){return"paths"===e[0]&&"responses"===e[3]&&"content"===e[5]&&"example"===e[7]},function(e){return"paths"===e[0]&&"responses"===e[3]&&"content"===e[5]&&"examples"===e[7]&&"value"===e[9]},function(e){return"paths"===e[0]&&"requestBody"===e[3]&&"content"===e[4]&&"example"===e[6]},function(e){return"paths"===e[0]&&"requestBody"===e[3]&&"content"===e[4]&&"examples"===e[6]&&"value"===e[8]},function(e){return"paths"===e[0]&&"parameters"===e[2]&&"example"===e[4]},function(e){return"paths"===e[0]&&"parameters"===e[3]&&"example"===e[5]},function(e){return"paths"===e[0]&&"parameters"===e[2]&&"examples"===e[4]&&"value"===e[6]},function(e){return"paths"===e[0]&&"parameters"===e[3]&&"examples"===e[5]&&"value"===e[7]},function(e){return"paths"===e[0]&&"parameters"===e[2]&&"content"===e[4]&&"example"===e[6]},function(e){return"paths"===e[0]&&"parameters"===e[2]&&"content"===e[4]&&"examples"===e[6]&&"value"===e[8]},function(e){return"paths"===e[0]&&"parameters"===e[3]&&"content"===e[4]&&"example"===e[7]},function(e){return"paths"===e[0]&&"parameters"===e[3]&&"content"===e[5]&&"examples"===e[7]&&"value"===e[9]}],qt={key:"$ref",plugin:function(e,t,r,n){var o=n.getInstance(),a=$()(r).call(r,0,-1);if(!Rt(a)&&!function(e){return Ut.some((function(t){return t(e)}))}(a)){var i=n.getContext(r).baseDoc;if("string"!=typeof e)return new Bt("$ref: must be a string (JSON-Ref)",{$ref:e,baseDoc:i,fullPath:r});var s,l,u,c=Jt(e),p=c[0],f=c[1]||"";try{s=i||p?Wt(p,i):null}catch(t){return Ht(t,{pointer:f,$ref:e,basePath:s,fullPath:r})}if(function(e,t,r,n){var o,a,i=zt.get(n);i||(i={},zt.set(n,i));var s=function(e){if(0===e.length)return"";return"/".concat(A()(e).call(e,Xt).join("/"))}(r),l=j()(o="".concat(t||"","#")).call(o,e),u=s.replace(/allOf\/\d+\/?/g,""),c=n.contextTree.get([]).baseDoc;if(t===c&&er(u,e))return!0;var p="",f=r.some((function(e){var t;return p=j()(t="".concat(p,"/")).call(t,Xt(e)),i[p]&&i[p].some((function(e){return er(e,l)||er(l,e)}))}));if(f)return!0;return void(i[u]=j()(a=i[u]||[]).call(a,l))}(f,s,a,n)&&!o.useCircularStructures){var h=Mt(e,s);return e===h?null:it.replace(r,h)}if(null==s?(u=Yt(f),void 0===(l=n.get(u))&&(l=new Bt("Could not resolve reference: ".concat(e),{pointer:f,$ref:e,baseDoc:i,fullPath:r}))):l=null!=(l=Kt(s,f)).__value?l.__value:l.catch((function(t){throw Ht(t,{pointer:f,$ref:e,baseDoc:i,fullPath:r})})),l instanceof Error)return[it.remove(r),l];var d=Mt(e,s),m=it.replace(a,l,{$$ref:d});if(s&&s!==i)return[m,it.context(a,{baseDoc:s})];try{if(!function(e,t){var r=[e];return t.path.reduce((function(e,t){return r.push(e[t]),e[t]}),e),n(t.value);function n(e){return it.isObject(e)&&(r.indexOf(e)>=0||C()(e).some((function(t){return n(e[t])})))}}(n.state,m)||o.useCircularStructures)return m}catch(e){return null}}}},Vt=Ee()(qt,{docCache:Ft,absoluteify:Wt,clearCache:function(e){void 0!==e?delete Ft[e]:C()(Ft).forEach((function(e){delete Ft[e]}))},JSONRefError:Bt,wrapError:Ht,getDoc:Gt,split:Jt,extractFromDoc:Kt,fetchJSON:function(e){return fetch(e,{headers:{Accept:Dt},loadSpec:!0}).then((function(e){return e.text()})).then((function(e){return D.ZP.load(e)}))},extract:Zt,jsonPointerToArray:Yt,unescapeJsonPointerToken:Qt});const $t=Vt;function Wt(e,t){if(!Lt.test(e)){var r;if(!t)throw new Bt(j()(r="Tried to resolve a relative URL, without having a basePath. path: '".concat(e,"' basePath: '")).call(r,t,"'"));return kt.resolve(t,e)}return e}function Ht(e,t){var r,n;e&&e.response&&e.response.body?r=j()(n="".concat(e.response.body.code," ")).call(n,e.response.body.message):r=e.message;return new Bt("Could not resolve reference: ".concat(r),t,e)}function Jt(e){return(e+"").split("#")}function Kt(e,t){var r=Ft[e];if(r&&!it.isPromise(r))try{var n=Zt(t,r);return Ee()(Ae().resolve(n),{__value:n})}catch(e){return Ae().reject(e)}return Gt(e).then((function(e){return Zt(t,e)}))}function Gt(e){var t=Ft[e];return t?it.isPromise(t)?t:Ae().resolve(t):(Ft[e]=Vt.fetchJSON(e).then((function(t){return Ft[e]=t,t})),Ft[e])}function Zt(e,t){var r=Yt(e);if(r.length<1)return t;var n=it.getIn(t,r);if(void 0===n)throw new Bt("Could not resolve pointer: ".concat(e," does not exist in document"),{pointer:e});return n}function Yt(e){var t;if("string"!=typeof e)throw new TypeError("Expected a string, got a ".concat(h()(e)));return"/"===e[0]&&(e=e.substr(1)),""===e?[]:A()(t=e.split("/")).call(t,Qt)}function Qt(e){return"string"!=typeof e?e:new(At())("=".concat(e.replace(/~1/g,"/").replace(/~0/g,"~"))).get("")}function Xt(e){var t,r=new(At())([["",e.replace(/~/g,"~0").replace(/\//g,"~1")]]);return $()(t=r.toString()).call(t,1)}function er(e,t){if(!(r=t)||"/"===r||"#"===r)return!0;var r,n=e.charAt(t.length),o=$()(t).call(t,-1);return 0===e.indexOf(t)&&(!n||"/"===n||"#"===n)&&"#"!==o}const tr={key:"allOf",plugin:function(e,t,r,n,o){if(!o.meta||!o.meta.$$ref){var a=$()(r).call(r,0,-1);if(!Rt(a)){if(!Array.isArray(e)){var i=new TypeError("allOf must be an array");return i.fullPath=r,i}var s=!1,l=o.value;if(a.forEach((function(e){l&&(l=l[e])})),l=me()({},l),0!==C()(l).length){delete l.allOf;var u,c,p=[];if(p.push(n.replace(a,{})),e.forEach((function(e,t){if(!n.isObject(e)){if(s)return null;s=!0;var o=new TypeError("Elements in allOf must be objects");return o.fullPath=r,p.push(o)}p.push(n.mergeDeep(a,e));var i=function(e,t){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},n=r.specmap,o=r.getBaseUrlForNodePath,a=void 0===o?function(e){var r;return n.getContext(j()(r=[]).call(r,q()(t),q()(e))).baseDoc}:o,i=r.targetKeys,s=void 0===i?["$ref","$$ref"]:i,l=[];return jt()(e).forEach((function(){if(v()(s).call(s,this.key)&&"string"==typeof this.node){var e=this.path,r=j()(t).call(t,this.path),o=Mt(this.node,a(e));l.push(n.replace(r,o))}})),l}(e,$()(r).call(r,0,-1),{getBaseUrlForNodePath:function(e){var o;return n.getContext(j()(o=[]).call(o,q()(r),[t],q()(e))).baseDoc},specmap:n});p.push.apply(p,q()(i))})),l.example)p.push(n.remove(j()(u=[]).call(u,a,"example")));if(p.push(n.mergeDeep(a,l)),!l.$$ref)p.push(n.remove(j()(c=[]).call(c,a,"$$ref")));return p}}}}},rr={key:"parameters",plugin:function(e,t,r,n){if(Array.isArray(e)&&e.length){var o=Ee()([],e),a=$()(r).call(r,0,-1),i=me()({},it.getIn(n.spec,a));return e.forEach((function(e,t){try{o[t].default=n.parameterMacro(i,e)}catch(e){var a=new Error(e);return a.fullPath=r,a}})),it.replace(r,o)}return it.replace(r,e)}},nr={key:"properties",plugin:function(e,t,r,n){var o=me()({},e);for(var a in e)try{o[a].default=n.modelPropertyMacro(o[a])}catch(e){var i=new Error(e);return i.fullPath=r,i}return it.replace(r,o)}};var or=function(){function e(t){ve()(this,e),this.root=ar(t||{})}return be()(e,[{key:"set",value:function(e,t){var r=this.getParent(e,!0);if(r){var n=e[e.length-1],o=r.children;o[n]?ir(o[n],t,r):o[n]=ar(t,r)}else ir(this.root,t,null)}},{key:"get",value:function(e){if((e=e||[]).length<1)return this.root.value;for(var t,r,n=this.root,o=0;o1?r-1:0),o=1;o1?n-1:0),a=1;a0}))}},{key:"nextPromisedPatch",value:function(){var e;if(this.promisedPatches.length>0)return Ae().race(A()(e=this.promisedPatches).call(e,(function(e){return e.value})))}},{key:"getPluginHistory",value:function(e){var t=this.constructor.getPluginName(e);return this.pluginHistory[t]||[]}},{key:"getPluginRunCount",value:function(e){return this.getPluginHistory(e).length}},{key:"getPluginHistoryTip",value:function(e){var t=this.getPluginHistory(e);return t&&t[t.length-1]||{}}},{key:"getPluginMutationIndex",value:function(e){var t=this.getPluginHistoryTip(e).mutationIndex;return"number"!=typeof t?-1:t}},{key:"updatePluginHistory",value:function(e,t){var r=this.constructor.getPluginName(e);this.pluginHistory[r]=this.pluginHistory[r]||[],this.pluginHistory[r].push(t)}},{key:"updatePatches",value:function(e){var t=this;it.normalizeArray(e).forEach((function(e){if(e instanceof Error)t.errors.push(e);else try{if(!it.isObject(e))return void t.debug("updatePatches","Got a non-object patch",e);if(t.showDebug&&t.allPatches.push(e),it.isPromise(e.value))return t.promisedPatches.push(e),void t.promisedPatchThen(e);if(it.isContextPatch(e))return void t.setContext(e.path,e.value);if(it.isMutation(e))return void t.updateMutations(e)}catch(e){console.error(e),t.errors.push(e)}}))}},{key:"updateMutations",value:function(e){"object"===h()(e.value)&&!Array.isArray(e.value)&&this.allowMetaPatches&&(e.value=me()({},e.value));var t=it.applyPatch(this.state,e,{allowMetaPatches:this.allowMetaPatches});t&&(this.mutations.push(e),this.state=t)}},{key:"removePromisedPatch",value:function(e){var t,r=this.promisedPatches.indexOf(e);r<0?this.debug("Tried to remove a promisedPatch that isn't there!"):Ce()(t=this.promisedPatches).call(t,r,1)}},{key:"promisedPatchThen",value:function(e){var t=this;return e.value=e.value.then((function(r){var n=me()(me()({},e),{},{value:r});t.removePromisedPatch(e),t.updatePatches(n)})).catch((function(r){t.removePromisedPatch(e),t.updatePatches(r)})),e.value}},{key:"getMutations",value:function(e,t){var r;return e=e||0,"number"!=typeof t&&(t=this.mutations.length),$()(r=this.mutations).call(r,e,t)}},{key:"getCurrentMutations",value:function(){return this.getMutationsForPlugin(this.getCurrentPlugin())}},{key:"getMutationsForPlugin",value:function(e){var t=this.getPluginMutationIndex(e);return this.getMutations(t+1)}},{key:"getCurrentPlugin",value:function(){return this.currentPlugin}},{key:"getLib",value:function(){return this.libMethods}},{key:"_get",value:function(e){return it.getIn(this.state,e)}},{key:"_getContext",value:function(e){return this.contextTree.get(e)}},{key:"setContext",value:function(e,t){return this.contextTree.set(e,t)}},{key:"_hasRun",value:function(e){return this.getPluginRunCount(this.getCurrentPlugin())>(e||0)}},{key:"dispatch",value:function(){var e,t=this,r=this,n=this.nextPlugin();if(!n){var o=this.nextPromisedPatch();if(o)return o.then((function(){return t.dispatch()})).catch((function(){return t.dispatch()}));var a={spec:this.state,errors:this.errors};return this.showDebug&&(a.patches=this.allPatches),Ae().resolve(a)}if(r.pluginCount=r.pluginCount||{},r.pluginCount[n]=(r.pluginCount[n]||0)+1,r.pluginCount[n]>100)return Ae().resolve({spec:r.state,errors:j()(e=r.errors).call(e,new Error("We've reached a hard limit of ".concat(100," plugin runs")))});if(n!==this.currentPlugin&&this.promisedPatches.length){var i,s=A()(i=this.promisedPatches).call(i,(function(e){return e.value}));return Ae().all(A()(s).call(s,(function(e){return e.then(sr,sr)}))).then((function(){return t.dispatch()}))}return function(){r.currentPlugin=n;var e=r.getCurrentMutations(),t=r.mutations.length-1;try{if(n.isGenerator){var o,a=p()(n(e,r.getLib()));try{for(a.s();!(o=a.n()).done;){l(o.value)}}catch(e){a.e(e)}finally{a.f()}}else{l(n(e,r.getLib()))}}catch(e){console.error(e),l([Ee()(Object.create(e),{plugin:n})])}finally{r.updatePluginHistory(n,{mutationIndex:t})}return r.dispatch()}();function l(e){e&&(e=it.fullyNormalizeArray(e),r.updatePatches(e,n))}}}],[{key:"getPluginName",value:function(e){return e.pluginName}},{key:"getPatchesOfType",value:function(e,t){return P()(e).call(e,t)}}]),e}();var ur={refs:$t,allOf:tr,parameters:rr,properties:nr},cr=r(23159);function pr(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=t.requestInterceptor,n=t.responseInterceptor,o=e.withCredentials?"include":"same-origin";return function(t){return e({url:t,loadSpec:!0,requestInterceptor:r,responseInterceptor:n,headers:{Accept:Dt},credentials:o}).then((function(e){return e.body}))}}function fr(e){var t=e.fetch,r=e.spec,n=e.url,o=e.mode,a=e.allowMetaPatches,i=void 0===a||a,l=e.pathDiscriminator,c=e.modelPropertyMacro,p=e.parameterMacro,f=e.requestInterceptor,h=e.responseInterceptor,d=e.skipNormalization,m=e.useCircularStructures,g=e.http,v=e.baseDoc;return v=v||n,g=t||g||Y,r?y(r):pr(g,{requestInterceptor:f,responseInterceptor:h})(v).then(y);function y(e){v&&(ur.refs.docCache[v]=e),ur.refs.fetchJSON=pr(g,{requestInterceptor:f,responseInterceptor:h});var t,r=[ur.refs];return"function"==typeof p&&r.push(ur.parameters),"function"==typeof c&&r.push(ur.properties),"strict"!==o&&r.push(ur.allOf),(t={spec:e,context:{baseDoc:v},plugins:r,allowMetaPatches:i,pathDiscriminator:l,parameterMacro:p,modelPropertyMacro:c,useCircularStructures:m},new lr(t).dispatch()).then(d?function(){var e=s()(u().mark((function e(t){return u().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.abrupt("return",t);case 1:case"end":return e.stop()}}),e)})));return function(t){return e.apply(this,arguments)}}():cr.K1)}}var hr=r(88436),dr=r.n(hr),mr=r(27361),gr=r.n(mr),vr=r(76489);function yr(e){return"[object Object]"===Object.prototype.toString.call(e)}function br(e){var t,r;return!1!==yr(e)&&(void 0===(t=e.constructor)||!1!==yr(r=t.prototype)&&!1!==r.hasOwnProperty("isPrototypeOf"))}const wr={body:function(e){var t=e.req,r=e.value;t.body=r},header:function(e){var t=e.req,r=e.parameter,n=e.value;t.headers=t.headers||{},void 0!==n&&(t.headers[r.name]=n)},query:function(e){var t=e.req,r=e.value,n=e.parameter;t.query=t.query||{},!1===r&&"boolean"===n.type&&(r="false");0===r&&["number","integer"].indexOf(n.type)>-1&&(r="0");if(r)t.query[n.name]={collectionFormat:n.collectionFormat,value:r};else if(n.allowEmptyValue&&void 0!==r){var o=n.name;t.query[o]=t.query[o]||{},t.query[o].allowEmptyValue=!0}},path:function(e){var t=e.req,r=e.value,n=e.parameter;t.url=t.url.split("{".concat(n.name,"}")).join(encodeURIComponent(r))},formData:function(e){var t=e.req,r=e.value,n=e.parameter;(r||n.allowEmptyValue)&&(t.form=t.form||{},t.form[n.name]={value:r,allowEmptyValue:n.allowEmptyValue,collectionFormat:n.collectionFormat})}};function Er(e,t){return v()(t).call(t,"application/json")?"string"==typeof e?e:_()(e):e.toString()}function xr(e){var t=e.req,r=e.value,n=e.parameter,o=n.name,a=n.style,i=n.explode,s=n.content;if(s){var l=C()(s)[0];t.url=t.url.split("{".concat(o,"}")).join(J(Er(r,l),{escape:!0}))}else{var u=K({key:n.name,value:r,style:a||"simple",explode:i||!1,escape:!0});t.url=t.url.split("{".concat(o,"}")).join(u)}}function _r(e){var t=e.req,r=e.value,n=e.parameter;if(t.query=t.query||{},n.content){var o=C()(n.content)[0];t.query[n.name]=Er(r,o)}else if(!1===r&&(r="false"),0===r&&(r="0"),r){var a=n.style,i=n.explode,s=n.allowReserved;t.query[n.name]={value:r,serializationOption:{style:a,explode:i,allowReserved:s}}}else if(n.allowEmptyValue&&void 0!==r){var l=n.name;t.query[l]=t.query[l]||{},t.query[l].allowEmptyValue=!0}}var Sr=["accept","authorization","content-type"];function Ar(e){var t=e.req,r=e.parameter,n=e.value;if(t.headers=t.headers||{},!(Sr.indexOf(r.name.toLowerCase())>-1))if(r.content){var o=C()(r.content)[0];t.headers[r.name]=Er(n,o)}else void 0!==n&&(t.headers[r.name]=K({key:r.name,value:n,style:r.style||"simple",explode:void 0!==r.explode&&r.explode,escape:!1}))}function kr(e){var t=e.req,r=e.parameter,n=e.value;t.headers=t.headers||{};var o=h()(n);if(r.content){var a,i=C()(r.content)[0];t.headers.Cookie=j()(a="".concat(r.name,"=")).call(a,Er(n,i))}else if("undefined"!==o){var s="object"===o&&!Array.isArray(n)&&r.explode?"":"".concat(r.name,"=");t.headers.Cookie=s+K({key:r.name,value:n,escape:!1,style:r.style||"form",explode:void 0!==r.explode&&r.explode})}}var Cr=r(92381),Or=r.n(Cr);const jr=(void 0!==Or()?Or():"undefined"!=typeof self?self:window).btoa;function Ir(e,t){var r=e.operation,n=e.requestBody,o=e.securities,a=e.spec,i=e.attachContentTypeForEmptyPayload,s=e.requestContentType;t=function(e){var t=e.request,r=e.securities,n=void 0===r?{}:r,o=e.operation,a=void 0===o?{}:o,i=e.spec,s=me()({},t),l=n.authorized,u=void 0===l?{}:l,c=a.security||i.security||[],p=u&&!!C()(u).length,f=gr()(i,["components","securitySchemes"])||{};if(s.headers=s.headers||{},s.query=s.query||{},!C()(n).length||!p||!c||Array.isArray(a.security)&&!a.security.length)return t;return c.forEach((function(e){C()(e).forEach((function(e){var t=u[e],r=f[e];if(t){var n=t.value||t,o=r.type;if(t)if("apiKey"===o)"query"===r.in&&(s.query[r.name]=n),"header"===r.in&&(s.headers[r.name]=n),"cookie"===r.in&&(s.cookies[r.name]=n);else if("http"===o){if(/^basic$/i.test(r.scheme)){var a,i=n.username||"",l=n.password||"",c=jr(j()(a="".concat(i,":")).call(a,l));s.headers.Authorization="Basic ".concat(c)}/^bearer$/i.test(r.scheme)&&(s.headers.Authorization="Bearer ".concat(n))}else if("oauth2"===o||"openIdConnect"===o){var p,h=t.token||{},d=h[r["x-tokenName"]||"access_token"],m=h.token_type;m&&"bearer"!==m.toLowerCase()||(m="Bearer"),s.headers.Authorization=j()(p="".concat(m," ")).call(p,d)}}}))})),s}({request:t,securities:o,operation:r,spec:a});var l=r.requestBody||{},u=C()(l.content||{}),c=s&&u.indexOf(s)>-1;if(n||i){if(s&&c)t.headers["Content-Type"]=s;else if(!s){var p=u[0];p&&(t.headers["Content-Type"]=p,s=p)}}else s&&c&&(t.headers["Content-Type"]=s);if(!e.responseContentType&&r.responses){var f,d=P()(f=N()(r.responses)).call(f,(function(e){var t=m()(e,2),r=t[0],n=t[1],o=parseInt(r,10);return o>=200&&o<300&&br(n.content)})).reduce((function(e,t){var r=m()(t,2)[1];return j()(e).call(e,C()(r.content))}),[]);d.length>0&&(t.headers.accept=d.join(", "))}if(n)if(s){if(u.indexOf(s)>-1)if("application/x-www-form-urlencoded"===s||"multipart/form-data"===s)if("object"===h()(n)){var g=(l.content[s]||{}).encoding||{};t.form={},C()(n).forEach((function(e){t.form[e]={value:n[e],encoding:g[e]||{}}}))}else t.form=n;else t.body=n}else t.body=n;return t}function Nr(e,t){var r,n,o=e.spec,a=e.operation,i=e.securities,s=e.requestContentType,l=e.responseContentType,u=e.attachContentTypeForEmptyPayload;if(t=function(e){var t=e.request,r=e.securities,n=void 0===r?{}:r,o=e.operation,a=void 0===o?{}:o,i=e.spec,s=me()({},t),l=n.authorized,u=void 0===l?{}:l,c=n.specSecurity,p=void 0===c?[]:c,f=a.security||p,h=u&&!!C()(u).length,d=i.securityDefinitions;if(s.headers=s.headers||{},s.query=s.query||{},!C()(n).length||!h||!f||Array.isArray(a.security)&&!a.security.length)return t;return f.forEach((function(e){C()(e).forEach((function(e){var t=u[e];if(t){var r=t.token,n=t.value||t,o=d[e],a=o.type,i=o["x-tokenName"]||"access_token",l=r&&r[i],c=r&&r.token_type;if(t)if("apiKey"===a){var p="query"===o.in?"query":"headers";s[p]=s[p]||{},s[p][o.name]=n}else if("basic"===a)if(n.header)s.headers.authorization=n.header;else{var f,h=n.username||"",m=n.password||"";n.base64=jr(j()(f="".concat(h,":")).call(f,m)),s.headers.authorization="Basic ".concat(n.base64)}else if("oauth2"===a&&l){var g;c=c&&"bearer"!==c.toLowerCase()?c:"Bearer",s.headers.authorization=j()(g="".concat(c," ")).call(g,l)}}}))})),s}({request:t,securities:i,operation:a,spec:o}),t.body||t.form||u)if(s)t.headers["Content-Type"]=s;else if(Array.isArray(a.consumes)){var c=m()(a.consumes,1);t.headers["Content-Type"]=c[0]}else if(Array.isArray(o.consumes)){var p=m()(o.consumes,1);t.headers["Content-Type"]=p[0]}else a.parameters&&P()(r=a.parameters).call(r,(function(e){return"file"===e.type})).length?t.headers["Content-Type"]="multipart/form-data":a.parameters&&P()(n=a.parameters).call(n,(function(e){return"formData"===e.in})).length&&(t.headers["Content-Type"]="application/x-www-form-urlencoded");else if(s){var f,h,d=a.parameters&&P()(f=a.parameters).call(f,(function(e){return"body"===e.in})).length>0,g=a.parameters&&P()(h=a.parameters).call(h,(function(e){return"formData"===e.in})).length>0;(d||g)&&(t.headers["Content-Type"]=s)}return!l&&Array.isArray(a.produces)&&a.produces.length>0&&(t.headers.accept=a.produces.join(", ")),t}var Tr=["http","fetch","spec","operationId","pathName","method","parameters","securities"],Pr=function(e){return Array.isArray(e)?e:[]},Rr=Ct("OperationNotFoundError",(function(e,t,r){this.originalError=r,Ee()(this,t||{})})),Mr={buildRequest:Lr};function Dr(e){var t=e.http,r=e.fetch,n=e.spec,o=e.operationId,a=e.pathName,i=e.method,s=e.parameters,l=e.securities,u=dr()(e,Tr),c=t||r||Y;a&&i&&!o&&(o=(0,cr.nc)(a,i));var p=Mr.buildRequest(me()({spec:n,operationId:o,parameters:s,securities:l,http:c},u));return p.body&&(br(p.body)||Array.isArray(p.body))&&(p.body=_()(p.body)),c(p)}function Lr(e){var t,r,n=e.spec,o=e.operationId,i=e.responseContentType,s=e.scheme,l=e.requestInterceptor,u=e.responseInterceptor,c=e.contextUrl,p=e.userFetch,f=e.server,h=e.serverVariables,d=e.http,g=e.signal,v=e.parameters,y=e.parameterBuilders,b=(0,cr.z6)(n);y||(y=b?a:wr);var w={url:"",credentials:d&&d.withCredentials?"include":"same-origin",headers:{},cookies:{}};g&&(w.signal=g),l&&(w.requestInterceptor=l),u&&(w.responseInterceptor=u),p&&(w.userFetch=p);var E=(0,cr.$r)(n,o);if(!E)throw new Rr("Operation ".concat(o," not found"));var x,_=E.operation,S=void 0===_?{}:_,k=E.method,O=E.pathName;if(w.url+=(x={spec:n,scheme:s,contextUrl:c,server:f,serverVariables:h,pathName:O,method:k},(0,cr.z6)(x.spec)?function(e){var t=e.spec,r=e.pathName,n=e.method,o=e.server,a=e.contextUrl,i=e.serverVariables,s=void 0===i?{}:i,l=gr()(t,["paths",r,(n||"").toLowerCase(),"servers"])||gr()(t,["paths",r,"servers"])||gr()(t,["servers"]),u="",c=null;if(o&&l&&l.length){var p=A()(l).call(l,(function(e){return e.url}));p.indexOf(o)>-1&&(u=o,c=l[p.indexOf(o)])}if(!u&&l&&l.length){u=l[0].url;var f=m()(l,1);c=f[0]}return u.indexOf("{")>-1&&function(e){for(var t,r=[],n=/{([^}]+)}/g;t=n.exec(e);)r.push(t[1]);return r}(u).forEach((function(e){if(c.variables&&c.variables[e]){var t=c.variables[e],r=s[e]||t.default,n=new RegExp("{".concat(e,"}"),"g");u=u.replace(n,r)}})),function(){var e,t,r=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",o=r&&n?kt.parse(kt.resolve(n,r)):kt.parse(r),a=kt.parse(n),i=Br(o.protocol)||Br(a.protocol)||"",s=o.host||a.host,l=o.pathname||"";return"/"===(e=i&&s?j()(t="".concat(i,"://")).call(t,s+l):l)[e.length-1]?$()(e).call(e,0,-1):e}(u,a)}(x):function(e){var t,r,n=e.spec,o=e.scheme,a=e.contextUrl,i=void 0===a?"":a,s=kt.parse(i),l=Array.isArray(n.schemes)?n.schemes[0]:null,u=o||l||Br(s.protocol)||"http",c=n.host||s.host||"",p=n.basePath||"";return"/"===(t=u&&c?j()(r="".concat(u,"://")).call(r,c+p):p)[t.length-1]?$()(t).call(t,0,-1):t}(x)),!o)return delete w.cookies,w;w.url+=O,w.method="".concat(k).toUpperCase(),v=v||{};var I=n.paths[O]||{};i&&(w.headers.accept=i);var N=function(e){var t={};e.forEach((function(e){t[e.in]||(t[e.in]={}),t[e.in][e.name]=e}));var r=[];return C()(t).forEach((function(e){C()(t[e]).forEach((function(n){r.push(t[e][n])}))})),r}(j()(t=j()(r=[]).call(r,Pr(S.parameters))).call(t,Pr(I.parameters)));N.forEach((function(e){var t,r,o=y[e.in];if("body"===e.in&&e.schema&&e.schema.properties&&(t=v),void 0===(t=e&&e.name&&v[e.name]))t=e&&e.name&&v[j()(r="".concat(e.in,".")).call(r,e.name)];else if(function(e,t){return P()(t).call(t,(function(t){return t.name===e}))}(e.name,N).length>1){var a;console.warn(j()(a="Parameter '".concat(e.name,"' is ambiguous because the defined spec has more than one parameter with the name: '")).call(a,e.name,"' and the passed-in parameter values did not define an 'in' value."))}if(null!==t){if(void 0!==e.default&&void 0===t&&(t=e.default),void 0===t&&e.required&&!e.allowEmptyValue)throw new Error("Required parameter ".concat(e.name," is not provided"));if(b&&e.schema&&"object"===e.schema.type&&"string"==typeof t)try{t=JSON.parse(t)}catch(e){throw new Error("Could not parse object parameter value string as JSON")}o&&o({req:w,parameter:e,value:t,operation:S,spec:n})}}));var T=me()(me()({},e),{},{operation:S});if((w=b?Ir(T,w):Nr(T,w)).cookies&&C()(w.cookies).length){var R=C()(w.cookies).reduce((function(e,t){var r=w.cookies[t];return e+(e?"&":"")+vr.serialize(t,r)}),"");w.headers.Cookie=R}return w.cookies&&delete w.cookies,fe(w),w}var Br=function(e){return e?e.replace(/\W/g,""):null};function Fr(e,t){return zr.apply(this,arguments)}function zr(){return zr=s()(u().mark((function e(t,r){var n,o,a,i,s,l,c,p,f,h,d,m,g=arguments;return u().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return n=g.length>2&&void 0!==g[2]?g[2]:{},o=n.returnEntireTree,a=n.baseDoc,i=n.requestInterceptor,s=n.responseInterceptor,l=n.parameterMacro,c=n.modelPropertyMacro,p=n.useCircularStructures,f={pathDiscriminator:r,baseDoc:a,requestInterceptor:i,responseInterceptor:s,parameterMacro:l,modelPropertyMacro:c,useCircularStructures:p},h=(0,cr.K1)({spec:t}),d=h.spec,e.next=6,fr(me()(me()({},f),{},{spec:d,allowMetaPatches:!0,skipNormalization:!0}));case 6:return m=e.sent,!o&&Array.isArray(r)&&r.length&&(m.spec=gr()(m.spec,r)||null),e.abrupt("return",m);case 9:case"end":return e.stop()}}),e)}))),zr.apply(this,arguments)}var Ur=r(34852);function qr(e){let{configs:t,getConfigs:r}=e;return{fn:{fetch:(n=Y,o=t.preFetch,a=t.postFetch,a=a||function(e){return e},o=o||function(e){return e},function(e){return"string"==typeof e&&(e={url:e}),Z.mergeInQueryOrForm(e),e=o(e),a(n(e))}),buildRequest:Lr,execute:Dr,resolve:fr,resolveSubtree:function(e,t,n){if(void 0===n){const e=r();n={modelPropertyMacro:e.modelPropertyMacro,parameterMacro:e.parameterMacro,requestInterceptor:e.requestInterceptor,responseInterceptor:e.responseInterceptor}}for(var o=arguments.length,a=new Array(o>3?o-3:0),i=3;i{"use strict";r.r(t),r.d(t,{default:()=>o});var n=r(90242);function o(){return{fn:{shallowEqualKeys:n.be}}}},48347:(e,t,r)=>{"use strict";r.r(t),r.d(t,{getDisplayName:()=>n});const n=e=>e.displayName||e.name||"Component"},73420:(e,t,r)=>{"use strict";r.r(t),r.d(t,{default:()=>u});var n=r(35627),o=r.n(n),a=r(90242),i=r(55776),s=r(48347),l=r(60314);const u=e=>{let{getComponents:t,getStore:r,getSystem:n}=e;const u=(c=(0,i.getComponent)(n,r,t),(0,a.HP)(c,(function(){for(var e=arguments.length,t=new Array(e),r=0;r(0,l.Z)(e,(function(){for(var e=arguments.length,t=new Array(e),r=0;r{"use strict";r.r(t),r.d(t,{getComponent:()=>ne,render:()=>re,withMappedContainer:()=>te});var n=r(23101),o=r.n(n),a=r(28222),i=r.n(a),s=r(67294),l=r(73935),u=r(97779),c=s.createContext(null);var p=function(e){e()},f=function(){return p},h={notify:function(){}};var d=function(){function e(e,t){this.store=e,this.parentSub=t,this.unsubscribe=null,this.listeners=h,this.handleChangeWrapper=this.handleChangeWrapper.bind(this)}var t=e.prototype;return t.addNestedSub=function(e){return this.trySubscribe(),this.listeners.subscribe(e)},t.notifyNestedSubs=function(){this.listeners.notify()},t.handleChangeWrapper=function(){this.onStateChange&&this.onStateChange()},t.isSubscribed=function(){return Boolean(this.unsubscribe)},t.trySubscribe=function(){this.unsubscribe||(this.unsubscribe=this.parentSub?this.parentSub.addNestedSub(this.handleChangeWrapper):this.store.subscribe(this.handleChangeWrapper),this.listeners=function(){var e=f(),t=null,r=null;return{clear:function(){t=null,r=null},notify:function(){e((function(){for(var e=t;e;)e.callback(),e=e.next}))},get:function(){for(var e=[],r=t;r;)e.push(r),r=r.next;return e},subscribe:function(e){var n=!0,o=r={callback:e,next:null,prev:r};return o.prev?o.prev.next=o:t=o,function(){n&&null!==t&&(n=!1,o.next?o.next.prev=o.prev:r=o.prev,o.prev?o.prev.next=o.next:t=o.next)}}}}())},t.tryUnsubscribe=function(){this.unsubscribe&&(this.unsubscribe(),this.unsubscribe=null,this.listeners.clear(),this.listeners=h)},e}(),m="undefined"!=typeof window&&void 0!==window.document&&void 0!==window.document.createElement?s.useLayoutEffect:s.useEffect;const g=function(e){var t=e.store,r=e.context,n=e.children,o=(0,s.useMemo)((function(){var e=new d(t);return e.onStateChange=e.notifyNestedSubs,{store:t,subscription:e}}),[t]),a=(0,s.useMemo)((function(){return t.getState()}),[t]);m((function(){var e=o.subscription;return e.trySubscribe(),a!==t.getState()&&e.notifyNestedSubs(),function(){e.tryUnsubscribe(),e.onStateChange=null}}),[o,a]);var i=r||c;return s.createElement(i.Provider,{value:o},n)};var v=r(87462),y=r(63366),b=r(8679),w=r.n(b),E=r(72973),x=[],_=[null,null];function S(e,t){var r=e[1];return[t.payload,r+1]}function A(e,t,r){m((function(){return e.apply(void 0,t)}),r)}function k(e,t,r,n,o,a,i){e.current=n,t.current=o,r.current=!1,a.current&&(a.current=null,i())}function C(e,t,r,n,o,a,i,s,l,u){if(e){var c=!1,p=null,f=function(){if(!c){var e,r,f=t.getState();try{e=n(f,o.current)}catch(e){r=e,p=e}r||(p=null),e===a.current?i.current||l():(a.current=e,s.current=e,i.current=!0,u({type:"STORE_UPDATED",payload:{error:r}}))}};r.onStateChange=f,r.trySubscribe(),f();return function(){if(c=!0,r.tryUnsubscribe(),r.onStateChange=null,p)throw p}}}var O=function(){return[null,0]};function j(e,t){void 0===t&&(t={});var r=t,n=r.getDisplayName,o=void 0===n?function(e){return"ConnectAdvanced("+e+")"}:n,a=r.methodName,i=void 0===a?"connectAdvanced":a,l=r.renderCountProp,u=void 0===l?void 0:l,p=r.shouldHandleStateChanges,f=void 0===p||p,h=r.storeKey,m=void 0===h?"store":h,g=(r.withRef,r.forwardRef),b=void 0!==g&&g,j=r.context,I=void 0===j?c:j,N=(0,y.Z)(r,["getDisplayName","methodName","renderCountProp","shouldHandleStateChanges","storeKey","withRef","forwardRef","context"]),T=I;return function(t){var r=t.displayName||t.name||"Component",n=o(r),a=(0,v.Z)({},N,{getDisplayName:o,methodName:i,renderCountProp:u,shouldHandleStateChanges:f,storeKey:m,displayName:n,wrappedComponentName:r,WrappedComponent:t}),l=N.pure;var c=l?s.useMemo:function(e){return e()};function p(r){var n=(0,s.useMemo)((function(){var e=r.reactReduxForwardedRef,t=(0,y.Z)(r,["reactReduxForwardedRef"]);return[r.context,e,t]}),[r]),o=n[0],i=n[1],l=n[2],u=(0,s.useMemo)((function(){return o&&o.Consumer&&(0,E.isContextConsumer)(s.createElement(o.Consumer,null))?o:T}),[o,T]),p=(0,s.useContext)(u),h=Boolean(r.store)&&Boolean(r.store.getState)&&Boolean(r.store.dispatch);Boolean(p)&&Boolean(p.store);var m=h?r.store:p.store,g=(0,s.useMemo)((function(){return function(t){return e(t.dispatch,a)}(m)}),[m]),b=(0,s.useMemo)((function(){if(!f)return _;var e=new d(m,h?null:p.subscription),t=e.notifyNestedSubs.bind(e);return[e,t]}),[m,h,p]),w=b[0],j=b[1],I=(0,s.useMemo)((function(){return h?p:(0,v.Z)({},p,{subscription:w})}),[h,p,w]),N=(0,s.useReducer)(S,x,O),P=N[0][0],R=N[1];if(P&&P.error)throw P.error;var M=(0,s.useRef)(),D=(0,s.useRef)(l),L=(0,s.useRef)(),B=(0,s.useRef)(!1),F=c((function(){return L.current&&l===D.current?L.current:g(m.getState(),l)}),[m,P,l]);A(k,[D,M,B,l,F,L,j]),A(C,[f,m,w,g,D,M,B,L,j,R],[m,w,g]);var z=(0,s.useMemo)((function(){return s.createElement(t,(0,v.Z)({},F,{ref:i}))}),[i,t,F]);return(0,s.useMemo)((function(){return f?s.createElement(u.Provider,{value:I},z):z}),[u,z,I])}var h=l?s.memo(p):p;if(h.WrappedComponent=t,h.displayName=p.displayName=n,b){var g=s.forwardRef((function(e,t){return s.createElement(h,(0,v.Z)({},e,{reactReduxForwardedRef:t}))}));return g.displayName=n,g.WrappedComponent=t,w()(g,t)}return w()(h,t)}}function I(e,t){return e===t?0!==e||0!==t||1/e==1/t:e!=e&&t!=t}function N(e,t){if(I(e,t))return!0;if("object"!=typeof e||null===e||"object"!=typeof t||null===t)return!1;var r=Object.keys(e),n=Object.keys(t);if(r.length!==n.length)return!1;for(var o=0;o=0;n--){var o=t[n](e);if(o)return o}return function(t,n){throw new Error("Invalid value of type "+typeof e+" for "+r+" argument when connecting component "+n.wrappedComponentName+".")}}function V(e,t){return e===t}function $(e){var t=void 0===e?{}:e,r=t.connectHOC,n=void 0===r?j:r,o=t.mapStateToPropsFactories,a=void 0===o?D:o,i=t.mapDispatchToPropsFactories,s=void 0===i?M:i,l=t.mergePropsFactories,u=void 0===l?B:l,c=t.selectorFactory,p=void 0===c?U:c;return function(e,t,r,o){void 0===o&&(o={});var i=o,l=i.pure,c=void 0===l||l,f=i.areStatesEqual,h=void 0===f?V:f,d=i.areOwnPropsEqual,m=void 0===d?N:d,g=i.areStatePropsEqual,b=void 0===g?N:g,w=i.areMergedPropsEqual,E=void 0===w?N:w,x=(0,y.Z)(i,["pure","areStatesEqual","areOwnPropsEqual","areStatePropsEqual","areMergedPropsEqual"]),_=q(e,a,"mapStateToProps"),S=q(t,s,"mapDispatchToProps"),A=q(r,u,"mergeProps");return n(p,(0,v.Z)({methodName:"connect",getDisplayName:function(e){return"Connect("+e+")"},shouldHandleStateChanges:Boolean(e),initMapStateToProps:_,initMapDispatchToProps:S,initMergeProps:A,pure:c,areStatesEqual:h,areOwnPropsEqual:m,areStatePropsEqual:b,areMergedPropsEqual:E},x))}}const W=$();var H;H=l.unstable_batchedUpdates,p=H;var J=r(57557),K=r.n(J),G=r(6557),Z=r.n(G);const Y=e=>t=>{const{fn:r}=e();class n extends s.Component{render(){return s.createElement(t,o()({},e(),this.props,this.context))}}return n.displayName=`WithSystem(${r.getDisplayName(t)})`,n},Q=(e,t)=>r=>{const{fn:n}=e();class a extends s.Component{render(){return s.createElement(g,{store:t},s.createElement(r,o()({},this.props,this.context)))}}return a.displayName=`WithRoot(${n.getDisplayName(r)})`,a},X=(e,t,r)=>(0,u.qC)(r?Q(e,r):Z(),W(((r,n)=>{var o;const a={...n,...e()},i=(null===(o=t.prototype)||void 0===o?void 0:o.mapStateToProps)||(e=>({state:e}));return i(r,a)})),Y(e))(t),ee=(e,t,r,n)=>{for(const o in t){const a=t[o];"function"==typeof a&&a(r[o],n[o],e())}},te=(e,t,r)=>(t,n)=>{const{fn:o}=e(),a=r(t,"root");class l extends s.Component{constructor(t,r){super(t,r),ee(e,n,t,{})}UNSAFE_componentWillReceiveProps(t){ee(e,n,t,this.props)}render(){const e=K()(this.props,n?i()(n):[]);return s.createElement(a,e)}}return l.displayName=`WithMappedContainer(${o.getDisplayName(a)})`,l},re=(e,t,r,n)=>o=>{const a=r(e,t,n)("App","root");l.render(s.createElement(a,null),o)},ne=(e,t,r)=>function(n,o){let a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};if("string"!=typeof n)throw new TypeError("Need a string, to fetch a component. Was given a "+typeof n);const i=r(n);return i?o?"root"===o?X(e,i,t()):X(e,i):i:(a.failSilently||e().log.warn("Could not find component:",n),null)}},33424:(e,t,r)=>{"use strict";r.d(t,{d3:()=>D,C2:()=>ee});var n=r(28222),o=r.n(n),a=r(58118),i=r.n(a),s=r(63366);function l(e,t){(null==t||t>e.length)&&(t=e.length);for(var r=0,n=new Array(t);r=4?[t[0],t[1],t[2],t[3],"".concat(t[0],".").concat(t[1]),"".concat(t[0],".").concat(t[2]),"".concat(t[0],".").concat(t[3]),"".concat(t[1],".").concat(t[0]),"".concat(t[1],".").concat(t[2]),"".concat(t[1],".").concat(t[3]),"".concat(t[2],".").concat(t[0]),"".concat(t[2],".").concat(t[1]),"".concat(t[2],".").concat(t[3]),"".concat(t[3],".").concat(t[0]),"".concat(t[3],".").concat(t[1]),"".concat(t[3],".").concat(t[2]),"".concat(t[0],".").concat(t[1],".").concat(t[2]),"".concat(t[0],".").concat(t[1],".").concat(t[3]),"".concat(t[0],".").concat(t[2],".").concat(t[1]),"".concat(t[0],".").concat(t[2],".").concat(t[3]),"".concat(t[0],".").concat(t[3],".").concat(t[1]),"".concat(t[0],".").concat(t[3],".").concat(t[2]),"".concat(t[1],".").concat(t[0],".").concat(t[2]),"".concat(t[1],".").concat(t[0],".").concat(t[3]),"".concat(t[1],".").concat(t[2],".").concat(t[0]),"".concat(t[1],".").concat(t[2],".").concat(t[3]),"".concat(t[1],".").concat(t[3],".").concat(t[0]),"".concat(t[1],".").concat(t[3],".").concat(t[2]),"".concat(t[2],".").concat(t[0],".").concat(t[1]),"".concat(t[2],".").concat(t[0],".").concat(t[3]),"".concat(t[2],".").concat(t[1],".").concat(t[0]),"".concat(t[2],".").concat(t[1],".").concat(t[3]),"".concat(t[2],".").concat(t[3],".").concat(t[0]),"".concat(t[2],".").concat(t[3],".").concat(t[1]),"".concat(t[3],".").concat(t[0],".").concat(t[1]),"".concat(t[3],".").concat(t[0],".").concat(t[2]),"".concat(t[3],".").concat(t[1],".").concat(t[0]),"".concat(t[3],".").concat(t[1],".").concat(t[2]),"".concat(t[3],".").concat(t[2],".").concat(t[0]),"".concat(t[3],".").concat(t[2],".").concat(t[1]),"".concat(t[0],".").concat(t[1],".").concat(t[2],".").concat(t[3]),"".concat(t[0],".").concat(t[1],".").concat(t[3],".").concat(t[2]),"".concat(t[0],".").concat(t[2],".").concat(t[1],".").concat(t[3]),"".concat(t[0],".").concat(t[2],".").concat(t[3],".").concat(t[1]),"".concat(t[0],".").concat(t[3],".").concat(t[1],".").concat(t[2]),"".concat(t[0],".").concat(t[3],".").concat(t[2],".").concat(t[1]),"".concat(t[1],".").concat(t[0],".").concat(t[2],".").concat(t[3]),"".concat(t[1],".").concat(t[0],".").concat(t[3],".").concat(t[2]),"".concat(t[1],".").concat(t[2],".").concat(t[0],".").concat(t[3]),"".concat(t[1],".").concat(t[2],".").concat(t[3],".").concat(t[0]),"".concat(t[1],".").concat(t[3],".").concat(t[0],".").concat(t[2]),"".concat(t[1],".").concat(t[3],".").concat(t[2],".").concat(t[0]),"".concat(t[2],".").concat(t[0],".").concat(t[1],".").concat(t[3]),"".concat(t[2],".").concat(t[0],".").concat(t[3],".").concat(t[1]),"".concat(t[2],".").concat(t[1],".").concat(t[0],".").concat(t[3]),"".concat(t[2],".").concat(t[1],".").concat(t[3],".").concat(t[0]),"".concat(t[2],".").concat(t[3],".").concat(t[0],".").concat(t[1]),"".concat(t[2],".").concat(t[3],".").concat(t[1],".").concat(t[0]),"".concat(t[3],".").concat(t[0],".").concat(t[1],".").concat(t[2]),"".concat(t[3],".").concat(t[0],".").concat(t[2],".").concat(t[1]),"".concat(t[3],".").concat(t[1],".").concat(t[0],".").concat(t[2]),"".concat(t[3],".").concat(t[1],".").concat(t[2],".").concat(t[0]),"".concat(t[3],".").concat(t[2],".").concat(t[0],".").concat(t[1]),"".concat(t[3],".").concat(t[2],".").concat(t[1],".").concat(t[0])]:void 0),m[n]}function v(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2?arguments[2]:void 0,n=e.filter((function(e){return"token"!==e})),o=g(n);return o.reduce((function(e,t){return d(d({},e),r[t])}),t)}function y(e){return e.join(" ")}function b(e){var t=e.node,r=e.stylesheet,n=e.style,o=void 0===n?{}:n,a=e.useInlineStyles,i=e.key,s=t.properties,l=t.type,u=t.tagName,c=t.value;if("text"===l)return c;if(u){var h,m=function(e,t){var r=0;return function(n){return r+=1,n.map((function(n,o){return b({node:n,stylesheet:e,useInlineStyles:t,key:"code-segment-".concat(r,"-").concat(o)})}))}}(r,a);if(a){var g=Object.keys(r).reduce((function(e,t){return t.split(".").forEach((function(t){e.includes(t)||e.push(t)})),e}),[]),w=s.className&&s.className.includes("token")?["token"]:[],E=s.className&&w.concat(s.className.filter((function(e){return!g.includes(e)})));h=d(d({},s),{},{className:y(E)||void 0,style:v(s.className,Object.assign({},s.style,o),r)})}else h=d(d({},s),{},{className:y(s.className)});var x=m(t.children);return p.createElement(u,(0,f.Z)({key:i},h),x)}}var w=["language","children","style","customStyle","codeTagProps","useInlineStyles","showLineNumbers","showInlineLineNumbers","startingLineNumber","lineNumberContainerStyle","lineNumberStyle","wrapLines","wrapLongLines","lineProps","renderer","PreTag","CodeTag","code","astGenerator"];function E(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function x(e){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:[],r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[],n=0;n2&&void 0!==arguments[2]?arguments[2]:[];return C({children:e,lineNumber:t,lineNumberStyle:s,largestLineNumber:i,showInlineLineNumbers:o,lineProps:r,className:a,showLineNumbers:n,wrapLongLines:l})}function m(e,t){if(n&&t&&o){var r=k(s,t,i);e.unshift(A(t,r))}return e}function g(e,r){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];return t||n.length>0?d(e,r,n):m(e,r)}for(var v=function(){var e=c[h],t=e.children[0].value;if(t.match(_)){var r=t.split("\n");r.forEach((function(t,o){var i=n&&p.length+a,s={type:"text",value:"".concat(t,"\n")};if(0===o){var l=g(c.slice(f+1,h).concat(C({children:[s],className:e.properties.className})),i);p.push(l)}else if(o===r.length-1){var u=c[h+1]&&c[h+1].children&&c[h+1].children[0],d={type:"text",value:"".concat(t)};if(u){var m=C({children:[d],className:e.properties.className});c.splice(h+1,0,m)}else{var v=g([d],i,e.properties.className);p.push(v)}}else{var y=g([s],i,e.properties.className);p.push(y)}})),f=h}h++};h=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}(e,w);q=q||T;var $=d?p.createElement(S,{containerStyle:b,codeStyle:u.style||{},numberStyle:_,startingLineNumber:y,codeString:U}):null,W=o.hljs||o['pre[class*="language-"]']||{backgroundColor:"#fff"},H=N(q)?"hljs":"prismjs",J=f?Object.assign({},V,{style:Object.assign({},W,i)}):Object.assign({},V,{className:V.className?"".concat(H," ").concat(V.className):H,style:Object.assign({},i)});if(u.style=x(x({},u.style),{},C?{whiteSpace:"pre-wrap"}:{whiteSpace:"pre"}),!q)return p.createElement(L,J,$,p.createElement(F,u,U));(void 0===A&&M||C)&&(A=!0),M=M||I;var K=[{type:"text",value:U}],G=function(e){var t=e.astGenerator,r=e.language,n=e.code,o=e.defaultCodeValue;if(N(t)){var a=function(e,t){return-1!==e.listLanguages().indexOf(t)}(t,r);return"text"===r?{value:o,language:"text"}:a?t.highlight(r,n):t.highlightAuto(n)}try{return r&&"text"!==r?{value:t.highlight(n,r)}:{value:o}}catch(e){return{value:o}}}({astGenerator:q,language:t,code:U,defaultCodeValue:K});null===G.language&&(G.value=K);var Z=j(G,A,R,d,g,y,G.value.length+y,_,C);return p.createElement(L,J,p.createElement(F,u,!g&&$,M({rows:Z,stylesheet:o,useInlineStyles:f})))});M.registerLanguage=R.registerLanguage;const D=M;var L=r(96344);const B=r.n(L)();var F=r(82026);const z=r.n(F)();var U=r(42157);const q=r.n(U)();var V=r(61519);const $=r.n(V)();var W=r(54587);const H=r.n(W)();var J=r(30786);const K=r.n(J)();var G=r(66336);const Z=r.n(G)(),Y={hljs:{display:"block",overflowX:"auto",padding:"0.5em",background:"#333",color:"white"},"hljs-name":{fontWeight:"bold"},"hljs-strong":{fontWeight:"bold"},"hljs-code":{fontStyle:"italic",color:"#888"},"hljs-emphasis":{fontStyle:"italic"},"hljs-tag":{color:"#62c8f3"},"hljs-variable":{color:"#ade5fc"},"hljs-template-variable":{color:"#ade5fc"},"hljs-selector-id":{color:"#ade5fc"},"hljs-selector-class":{color:"#ade5fc"},"hljs-string":{color:"#a2fca2"},"hljs-bullet":{color:"#d36363"},"hljs-type":{color:"#ffa"},"hljs-title":{color:"#ffa"},"hljs-section":{color:"#ffa"},"hljs-attribute":{color:"#ffa"},"hljs-quote":{color:"#ffa"},"hljs-built_in":{color:"#ffa"},"hljs-builtin-name":{color:"#ffa"},"hljs-number":{color:"#d36363"},"hljs-symbol":{color:"#d36363"},"hljs-keyword":{color:"#fcc28c"},"hljs-selector-tag":{color:"#fcc28c"},"hljs-literal":{color:"#fcc28c"},"hljs-comment":{color:"#888"},"hljs-deletion":{color:"#333",backgroundColor:"#fc9b9b"},"hljs-regexp":{color:"#c6b4f0"},"hljs-link":{color:"#c6b4f0"},"hljs-meta":{color:"#fc9b9b"},"hljs-addition":{backgroundColor:"#a2fca2",color:"#333"}};D.registerLanguage("json",z),D.registerLanguage("js",B),D.registerLanguage("xml",q),D.registerLanguage("yaml",H),D.registerLanguage("http",K),D.registerLanguage("bash",$),D.registerLanguage("powershell",Z),D.registerLanguage("javascript",B);const Q={agate:Y,arta:{hljs:{display:"block",overflowX:"auto",padding:"0.5em",background:"#222",color:"#aaa"},"hljs-subst":{color:"#aaa"},"hljs-section":{color:"#fff",fontWeight:"bold"},"hljs-comment":{color:"#444"},"hljs-quote":{color:"#444"},"hljs-meta":{color:"#444"},"hljs-string":{color:"#ffcc33"},"hljs-symbol":{color:"#ffcc33"},"hljs-bullet":{color:"#ffcc33"},"hljs-regexp":{color:"#ffcc33"},"hljs-number":{color:"#00cc66"},"hljs-addition":{color:"#00cc66"},"hljs-built_in":{color:"#32aaee"},"hljs-builtin-name":{color:"#32aaee"},"hljs-literal":{color:"#32aaee"},"hljs-type":{color:"#32aaee"},"hljs-template-variable":{color:"#32aaee"},"hljs-attribute":{color:"#32aaee"},"hljs-link":{color:"#32aaee"},"hljs-keyword":{color:"#6644aa"},"hljs-selector-tag":{color:"#6644aa"},"hljs-name":{color:"#6644aa"},"hljs-selector-id":{color:"#6644aa"},"hljs-selector-class":{color:"#6644aa"},"hljs-title":{color:"#bb1166"},"hljs-variable":{color:"#bb1166"},"hljs-deletion":{color:"#bb1166"},"hljs-template-tag":{color:"#bb1166"},"hljs-doctag":{fontWeight:"bold"},"hljs-strong":{fontWeight:"bold"},"hljs-emphasis":{fontStyle:"italic"}},monokai:{hljs:{display:"block",overflowX:"auto",padding:"0.5em",background:"#272822",color:"#ddd"},"hljs-tag":{color:"#f92672"},"hljs-keyword":{color:"#f92672",fontWeight:"bold"},"hljs-selector-tag":{color:"#f92672",fontWeight:"bold"},"hljs-literal":{color:"#f92672",fontWeight:"bold"},"hljs-strong":{color:"#f92672"},"hljs-name":{color:"#f92672"},"hljs-code":{color:"#66d9ef"},"hljs-class .hljs-title":{color:"white"},"hljs-attribute":{color:"#bf79db"},"hljs-symbol":{color:"#bf79db"},"hljs-regexp":{color:"#bf79db"},"hljs-link":{color:"#bf79db"},"hljs-string":{color:"#a6e22e"},"hljs-bullet":{color:"#a6e22e"},"hljs-subst":{color:"#a6e22e"},"hljs-title":{color:"#a6e22e",fontWeight:"bold"},"hljs-section":{color:"#a6e22e",fontWeight:"bold"},"hljs-emphasis":{color:"#a6e22e"},"hljs-type":{color:"#a6e22e",fontWeight:"bold"},"hljs-built_in":{color:"#a6e22e"},"hljs-builtin-name":{color:"#a6e22e"},"hljs-selector-attr":{color:"#a6e22e"},"hljs-selector-pseudo":{color:"#a6e22e"},"hljs-addition":{color:"#a6e22e"},"hljs-variable":{color:"#a6e22e"},"hljs-template-tag":{color:"#a6e22e"},"hljs-template-variable":{color:"#a6e22e"},"hljs-comment":{color:"#75715e"},"hljs-quote":{color:"#75715e"},"hljs-deletion":{color:"#75715e"},"hljs-meta":{color:"#75715e"},"hljs-doctag":{fontWeight:"bold"},"hljs-selector-id":{fontWeight:"bold"}},nord:{hljs:{display:"block",overflowX:"auto",padding:"0.5em",background:"#2E3440",color:"#D8DEE9"},"hljs-subst":{color:"#D8DEE9"},"hljs-selector-tag":{color:"#81A1C1"},"hljs-selector-id":{color:"#8FBCBB",fontWeight:"bold"},"hljs-selector-class":{color:"#8FBCBB"},"hljs-selector-attr":{color:"#8FBCBB"},"hljs-selector-pseudo":{color:"#88C0D0"},"hljs-addition":{backgroundColor:"rgba(163, 190, 140, 0.5)"},"hljs-deletion":{backgroundColor:"rgba(191, 97, 106, 0.5)"},"hljs-built_in":{color:"#8FBCBB"},"hljs-type":{color:"#8FBCBB"},"hljs-class":{color:"#8FBCBB"},"hljs-function":{color:"#88C0D0"},"hljs-function > .hljs-title":{color:"#88C0D0"},"hljs-keyword":{color:"#81A1C1"},"hljs-literal":{color:"#81A1C1"},"hljs-symbol":{color:"#81A1C1"},"hljs-number":{color:"#B48EAD"},"hljs-regexp":{color:"#EBCB8B"},"hljs-string":{color:"#A3BE8C"},"hljs-title":{color:"#8FBCBB"},"hljs-params":{color:"#D8DEE9"},"hljs-bullet":{color:"#81A1C1"},"hljs-code":{color:"#8FBCBB"},"hljs-emphasis":{fontStyle:"italic"},"hljs-formula":{color:"#8FBCBB"},"hljs-strong":{fontWeight:"bold"},"hljs-link:hover":{textDecoration:"underline"},"hljs-quote":{color:"#4C566A"},"hljs-comment":{color:"#4C566A"},"hljs-doctag":{color:"#8FBCBB"},"hljs-meta":{color:"#5E81AC"},"hljs-meta-keyword":{color:"#5E81AC"},"hljs-meta-string":{color:"#A3BE8C"},"hljs-attr":{color:"#8FBCBB"},"hljs-attribute":{color:"#D8DEE9"},"hljs-builtin-name":{color:"#81A1C1"},"hljs-name":{color:"#81A1C1"},"hljs-section":{color:"#88C0D0"},"hljs-tag":{color:"#81A1C1"},"hljs-variable":{color:"#D8DEE9"},"hljs-template-variable":{color:"#D8DEE9"},"hljs-template-tag":{color:"#5E81AC"},"abnf .hljs-attribute":{color:"#88C0D0"},"abnf .hljs-symbol":{color:"#EBCB8B"},"apache .hljs-attribute":{color:"#88C0D0"},"apache .hljs-section":{color:"#81A1C1"},"arduino .hljs-built_in":{color:"#88C0D0"},"aspectj .hljs-meta":{color:"#D08770"},"aspectj > .hljs-title":{color:"#88C0D0"},"bnf .hljs-attribute":{color:"#8FBCBB"},"clojure .hljs-name":{color:"#88C0D0"},"clojure .hljs-symbol":{color:"#EBCB8B"},"coq .hljs-built_in":{color:"#88C0D0"},"cpp .hljs-meta-string":{color:"#8FBCBB"},"css .hljs-built_in":{color:"#88C0D0"},"css .hljs-keyword":{color:"#D08770"},"diff .hljs-meta":{color:"#8FBCBB"},"ebnf .hljs-attribute":{color:"#8FBCBB"},"glsl .hljs-built_in":{color:"#88C0D0"},"groovy .hljs-meta:not(:first-child)":{color:"#D08770"},"haxe .hljs-meta":{color:"#D08770"},"java .hljs-meta":{color:"#D08770"},"ldif .hljs-attribute":{color:"#8FBCBB"},"lisp .hljs-name":{color:"#88C0D0"},"lua .hljs-built_in":{color:"#88C0D0"},"moonscript .hljs-built_in":{color:"#88C0D0"},"nginx .hljs-attribute":{color:"#88C0D0"},"nginx .hljs-section":{color:"#5E81AC"},"pf .hljs-built_in":{color:"#88C0D0"},"processing .hljs-built_in":{color:"#88C0D0"},"scss .hljs-keyword":{color:"#81A1C1"},"stylus .hljs-keyword":{color:"#81A1C1"},"swift .hljs-meta":{color:"#D08770"},"vim .hljs-built_in":{color:"#88C0D0",fontStyle:"italic"},"yaml .hljs-meta":{color:"#D08770"}},obsidian:{hljs:{display:"block",overflowX:"auto",padding:"0.5em",background:"#282b2e",color:"#e0e2e4"},"hljs-keyword":{color:"#93c763",fontWeight:"bold"},"hljs-selector-tag":{color:"#93c763",fontWeight:"bold"},"hljs-literal":{color:"#93c763",fontWeight:"bold"},"hljs-selector-id":{color:"#93c763"},"hljs-number":{color:"#ffcd22"},"hljs-attribute":{color:"#668bb0"},"hljs-code":{color:"white"},"hljs-class .hljs-title":{color:"white"},"hljs-section":{color:"white",fontWeight:"bold"},"hljs-regexp":{color:"#d39745"},"hljs-link":{color:"#d39745"},"hljs-meta":{color:"#557182"},"hljs-tag":{color:"#8cbbad"},"hljs-name":{color:"#8cbbad",fontWeight:"bold"},"hljs-bullet":{color:"#8cbbad"},"hljs-subst":{color:"#8cbbad"},"hljs-emphasis":{color:"#8cbbad"},"hljs-type":{color:"#8cbbad",fontWeight:"bold"},"hljs-built_in":{color:"#8cbbad"},"hljs-selector-attr":{color:"#8cbbad"},"hljs-selector-pseudo":{color:"#8cbbad"},"hljs-addition":{color:"#8cbbad"},"hljs-variable":{color:"#8cbbad"},"hljs-template-tag":{color:"#8cbbad"},"hljs-template-variable":{color:"#8cbbad"},"hljs-string":{color:"#ec7600"},"hljs-symbol":{color:"#ec7600"},"hljs-comment":{color:"#818e96"},"hljs-quote":{color:"#818e96"},"hljs-deletion":{color:"#818e96"},"hljs-selector-class":{color:"#A082BD"},"hljs-doctag":{fontWeight:"bold"},"hljs-title":{fontWeight:"bold"},"hljs-strong":{fontWeight:"bold"}},"tomorrow-night":{"hljs-comment":{color:"#969896"},"hljs-quote":{color:"#969896"},"hljs-variable":{color:"#cc6666"},"hljs-template-variable":{color:"#cc6666"},"hljs-tag":{color:"#cc6666"},"hljs-name":{color:"#cc6666"},"hljs-selector-id":{color:"#cc6666"},"hljs-selector-class":{color:"#cc6666"},"hljs-regexp":{color:"#cc6666"},"hljs-deletion":{color:"#cc6666"},"hljs-number":{color:"#de935f"},"hljs-built_in":{color:"#de935f"},"hljs-builtin-name":{color:"#de935f"},"hljs-literal":{color:"#de935f"},"hljs-type":{color:"#de935f"},"hljs-params":{color:"#de935f"},"hljs-meta":{color:"#de935f"},"hljs-link":{color:"#de935f"},"hljs-attribute":{color:"#f0c674"},"hljs-string":{color:"#b5bd68"},"hljs-symbol":{color:"#b5bd68"},"hljs-bullet":{color:"#b5bd68"},"hljs-addition":{color:"#b5bd68"},"hljs-title":{color:"#81a2be"},"hljs-section":{color:"#81a2be"},"hljs-keyword":{color:"#b294bb"},"hljs-selector-tag":{color:"#b294bb"},hljs:{display:"block",overflowX:"auto",background:"#1d1f21",color:"#c5c8c6",padding:"0.5em"},"hljs-emphasis":{fontStyle:"italic"},"hljs-strong":{fontWeight:"bold"}}},X=o()(Q),ee=e=>i()(X).call(X,e)?Q[e]:(console.warn(`Request style '${e}' is not available, returning default instead`),Y)},90242:(e,t,r)=>{"use strict";r.d(t,{mz:()=>pe,oG:()=>fe,AF:()=>he,LQ:()=>de,Kn:()=>me,Wl:()=>ge,kJ:()=>ve,HP:()=>ye,Ay:()=>be,Q2:()=>we,_5:()=>Ee,iQ:()=>xe,gp:()=>_e,DR:()=>Se,Zl:()=>Ae,Ik:()=>Ce,xi:()=>Pe,UG:()=>Re,r3:()=>Me,wh:()=>De,GZ:()=>Le,be:()=>Be,Nm:()=>Fe,hW:()=>ze,QG:()=>Ue,oJ:()=>qe,J6:()=>Ve,nX:()=>$e,po:()=>We,XV:()=>He,Pz:()=>Je,D$:()=>Ke,V9:()=>Ge,cz:()=>Ze,Uj:()=>Ye,Xb:()=>Qe,O2:()=>et});var n=r(58309),o=r.n(n),a=r(97606),i=r.n(a),s=r(74386),l=r.n(s),u=r(86),c=r.n(u),p=r(14418),f=r.n(p),h=r(28222),d=r.n(h),m=(r(11189),r(24282)),g=r.n(m),v=r(76986),y=r.n(v),b=r(2578),w=r.n(b),E=r(24278),x=r.n(E),_=(r(39022),r(92039)),S=r.n(_),A=(r(58118),r(35627)),k=r.n(A),C=r(11882),O=r.n(C),j=r(51679),I=r.n(j),N=r(27043),T=r.n(N),P=r(81607),R=r.n(P),M=r(43393),D=r.n(M),L=r(17967),B=r(68929),F=r.n(B),z=r(11700),U=r.n(z),q=r(88306),V=r.n(q),$=r(13311),W=r.n($),H=r(59704),J=r.n(H),K=r(77813),G=r.n(K),Z=r(23560),Y=r.n(Z),Q=r(57050),X=r(27504),ee=r(8269),te=r.n(ee),re=r(19069),ne=r(92282),oe=r.n(ne),ae=r(89072),ie=r.n(ae),se=r(1272),le=r(48764).Buffer;const ue="default",ce=e=>D().Iterable.isIterable(e);function pe(e){return me(e)?ce(e)?e.toJS():e:{}}function fe(e){var t,r;if(ce(e))return e;if(e instanceof X.Z.File)return e;if(!me(e))return e;if(o()(e))return i()(r=D().Seq(e)).call(r,fe).toList();if(Y()(l()(e))){var n;const t=function(e){if(!Y()(l()(e)))return e;const t={},r="_**[]",n={};for(let o of l()(e).call(e))if(t[o[0]]||n[o[0]]&&n[o[0]].containsMultiple){if(!n[o[0]]){n[o[0]]={containsMultiple:!0,length:1},t[`${o[0]}${r}${n[o[0]].length}`]=t[o[0]],delete t[o[0]]}n[o[0]].length+=1,t[`${o[0]}${r}${n[o[0]].length}`]=o[1]}else t[o[0]]=o[1];return t}(e);return i()(n=D().OrderedMap(t)).call(n,fe)}return i()(t=D().OrderedMap(e)).call(t,fe)}function he(e){return o()(e)?e:[e]}function de(e){return"function"==typeof e}function me(e){return!!e&&"object"==typeof e}function ge(e){return"function"==typeof e}function ve(e){return o()(e)}const ye=V();function be(e,t){var r;return g()(r=d()(e)).call(r,((r,n)=>(r[n]=t(e[n],n),r)),{})}function we(e,t){var r;return g()(r=d()(e)).call(r,((r,n)=>{let o=t(e[n],n);return o&&"object"==typeof o&&y()(r,o),r}),{})}function Ee(e){return t=>{let{dispatch:r,getState:n}=t;return t=>r=>"function"==typeof r?r(e()):t(r)}}function xe(e){var t;let r=e.keySeq();return r.contains(ue)?ue:w()(t=f()(r).call(r,(e=>"2"===(e+"")[0]))).call(t).first()}function _e(e,t){if(!D().Iterable.isIterable(e))return D().List();let r=e.getIn(o()(t)?t:[t]);return D().List.isList(r)?r:D().List()}function Se(e){let t,r=[/filename\*=[^']+'\w*'"([^"]+)";?/i,/filename\*=[^']+'\w*'([^;]+);?/i,/filename="([^;]*);?"/i,/filename=([^;]*);?/i];if(S()(r).call(r,(r=>(t=r.exec(e),null!==t))),null!==t&&t.length>1)try{return decodeURIComponent(t[1])}catch(e){console.error(e)}return null}function Ae(e){return t=e.replace(/\.[^./]*$/,""),U()(F()(t));var t}function ke(e,t,r,n,a){if(!t)return[];let s=[],l=t.get("nullable"),u=t.get("required"),p=t.get("maximum"),h=t.get("minimum"),d=t.get("type"),m=t.get("format"),g=t.get("maxLength"),v=t.get("minLength"),y=t.get("uniqueItems"),b=t.get("maxItems"),w=t.get("minItems"),E=t.get("pattern");const x=r||!0===u,_=null!=e;if(l&&null===e||!d||!(x||_&&"array"===d||!(!x&&!_)))return[];let A="string"===d&&e,k="array"===d&&o()(e)&&e.length,C="array"===d&&D().List.isList(e)&&e.count();const O=[A,k,C,"array"===d&&"string"==typeof e&&e,"file"===d&&e instanceof X.Z.File,"boolean"===d&&(e||!1===e),"number"===d&&(e||0===e),"integer"===d&&(e||0===e),"object"===d&&"object"==typeof e&&null!==e,"object"===d&&"string"==typeof e&&e],j=S()(O).call(O,(e=>!!e));if(x&&!j&&!n)return s.push("Required field is not provided"),s;if("object"===d&&(null===a||"application/json"===a)){let r=e;if("string"==typeof e)try{r=JSON.parse(e)}catch(e){return s.push("Parameter string value must be valid JSON"),s}var I;if(t&&t.has("required")&&ge(u.isList)&&u.isList()&&c()(u).call(u,(e=>{void 0===r[e]&&s.push({propKey:e,error:"Required property not found"})})),t&&t.has("properties"))c()(I=t.get("properties")).call(I,((e,t)=>{const o=ke(r[t],e,!1,n,a);s.push(...i()(o).call(o,(e=>({propKey:t,error:e}))))}))}if(E){let t=((e,t)=>{if(!new RegExp(t).test(e))return"Value must follow pattern "+t})(e,E);t&&s.push(t)}if(w&&"array"===d){let t=((e,t)=>{if(!e&&t>=1||e&&e.length{if(e&&e.length>t)return`Array must not contain more then ${t} item${1===t?"":"s"}`})(e,b);t&&s.push({needRemove:!0,error:t})}if(y&&"array"===d){let t=((e,t)=>{if(e&&("true"===t||!0===t)){const t=(0,M.fromJS)(e),r=t.toSet();if(e.length>r.size){let e=(0,M.Set)();if(c()(t).call(t,((r,n)=>{f()(t).call(t,(e=>ge(e.equals)?e.equals(r):e===r)).size>1&&(e=e.add(n))})),0!==e.size)return i()(e).call(e,(e=>({index:e,error:"No duplicates allowed."}))).toArray()}}})(e,y);t&&s.push(...t)}if(g||0===g){let t=((e,t)=>{if(e.length>t)return`Value must be no longer than ${t} character${1!==t?"s":""}`})(e,g);t&&s.push(t)}if(v){let t=((e,t)=>{if(e.length{if(e>t)return`Value must be less than ${t}`})(e,p);t&&s.push(t)}if(h||0===h){let t=((e,t)=>{if(e{if(isNaN(Date.parse(e)))return"Value must be a DateTime"})(e):"uuid"===m?(e=>{if(e=e.toString().toLowerCase(),!/^[{(]?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}[)}]?$/.test(e))return"Value must be a Guid"})(e):(e=>{if(e&&"string"!=typeof e)return"Value must be a string"})(e),!t)return s;s.push(t)}else if("boolean"===d){let t=(e=>{if("true"!==e&&"false"!==e&&!0!==e&&!1!==e)return"Value must be a boolean"})(e);if(!t)return s;s.push(t)}else if("number"===d){let t=(e=>{if(!/^-?\d+(\.?\d+)?$/.test(e))return"Value must be a number"})(e);if(!t)return s;s.push(t)}else if("integer"===d){let t=(e=>{if(!/^-?\d+$/.test(e))return"Value must be an integer"})(e);if(!t)return s;s.push(t)}else if("array"===d){if(!k&&!C)return s;e&&c()(e).call(e,((e,r)=>{const o=ke(e,t.get("items"),!1,n,a);s.push(...i()(o).call(o,(e=>({index:r,error:e}))))}))}else if("file"===d){let t=(e=>{if(e&&!(e instanceof X.Z.File))return"Value must be a file"})(e);if(!t)return s;s.push(t)}return s}const Ce=function(e,t){let{isOAS3:r=!1,bypassRequiredCheck:n=!1}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=e.get("required"),{schema:a,parameterContentMediaType:i}=(0,re.Z)(e,{isOAS3:r});return ke(t,a,o,n,i)},Oe=(e,t,r)=>{if(e&&!e.xml&&(e.xml={}),e&&!e.xml.name){if(!e.$$ref&&(e.type||e.items||e.properties||e.additionalProperties))return'\n\x3c!-- XML example cannot be generated; root element name is undefined --\x3e';if(e.$$ref){let t=e.$$ref.match(/\S*\/(\S+)$/);e.xml.name=t[1]}}return(0,Q.memoizedCreateXMLExample)(e,t,r)},je=[{when:/json/,shouldStringifyTypes:["string"]}],Ie=["object"],Ne=(e,t,r,n)=>{const o=(0,Q.memoizedSampleFromSchema)(e,t,n),a=typeof o,i=g()(je).call(je,((e,t)=>t.when.test(r)?[...e,...t.shouldStringifyTypes]:e),Ie);return J()(i,(e=>e===a))?k()(o,null,2):o},Te=(e,t,r,n)=>{const o=Ne(e,t,r,n);let a;try{a=se.ZP.dump(se.ZP.load(o),{lineWidth:-1},{schema:se.A8}),"\n"===a[a.length-1]&&(a=x()(a).call(a,0,a.length-1))}catch(e){return console.error(e),"error: could not generate yaml example"}return a.replace(/\t/g," ")},Pe=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},n=arguments.length>3&&void 0!==arguments[3]?arguments[3]:void 0;return e&&ge(e.toJS)&&(e=e.toJS()),n&&ge(n.toJS)&&(n=n.toJS()),/xml/.test(t)?Oe(e,r,n):/(yaml|yml)/.test(t)?Te(e,r,t,n):Ne(e,r,t,n)},Re=()=>{let e={},t=X.Z.location.search;if(!t)return{};if(""!=t){let r=t.substr(1).split("&");for(let t in r)Object.prototype.hasOwnProperty.call(r,t)&&(t=r[t].split("="),e[decodeURIComponent(t[0])]=t[1]&&decodeURIComponent(t[1])||"")}return e},Me=e=>{let t;return t=e instanceof le?e:le.from(e.toString(),"utf-8"),t.toString("base64")},De={operationsSorter:{alpha:(e,t)=>e.get("path").localeCompare(t.get("path")),method:(e,t)=>e.get("method").localeCompare(t.get("method"))},tagsSorter:{alpha:(e,t)=>e.localeCompare(t)}},Le=e=>{let t=[];for(let r in e){let n=e[r];void 0!==n&&""!==n&&t.push([r,"=",encodeURIComponent(n).replace(/%20/g,"+")].join(""))}return t.join("&")},Be=(e,t,r)=>!!W()(r,(r=>G()(e[r],t[r])));function Fe(e){return"string"!=typeof e||""===e?"":(0,L.N)(e)}function ze(e){return!(!e||O()(e).call(e,"localhost")>=0||O()(e).call(e,"127.0.0.1")>=0||"none"===e)}function Ue(e){if(!D().OrderedMap.isOrderedMap(e))return null;if(!e.size)return null;const t=I()(e).call(e,((e,t)=>T()(t).call(t,"2")&&d()(e.get("content")||{}).length>0)),r=e.get("default")||D().OrderedMap(),n=(r.get("content")||D().OrderedMap()).keySeq().toJS().length?r:null;return t||n}const qe=e=>"string"==typeof e||e instanceof String?R()(e).call(e).replace(/\s/g,"%20"):"",Ve=e=>te()(qe(e).replace(/%20/g,"_")),$e=e=>f()(e).call(e,((e,t)=>/^x-/.test(t))),We=e=>f()(e).call(e,((e,t)=>/^pattern|maxLength|minLength|maximum|minimum/.test(t)));function He(e,t){var r;let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:()=>!0;if("object"!=typeof e||o()(e)||null===e||!t)return e;const a=y()({},e);return c()(r=d()(a)).call(r,(e=>{e===t&&n(a[e],e)?delete a[e]:a[e]=He(a[e],t,n)})),a}function Je(e){if("string"==typeof e)return e;if(e&&e.toJS&&(e=e.toJS()),"object"==typeof e&&null!==e)try{return k()(e,null,2)}catch(t){return String(e)}return null==e?"":e.toString()}function Ke(e){return"number"==typeof e?e.toString():e}function Ge(e){let{returnAll:t=!1,allowHashes:r=!0}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(!D().Map.isMap(e))throw new Error("paramToIdentifier: received a non-Im.Map parameter as input");const n=e.get("name"),o=e.get("in");let a=[];return e&&e.hashCode&&o&&n&&r&&a.push(`${o}.${n}.hash-${e.hashCode()}`),o&&n&&a.push(`${o}.${n}`),a.push(n),t?a:a[0]||""}function Ze(e,t){var r;const n=Ge(e,{returnAll:!0});return f()(r=i()(n).call(n,(e=>t[e]))).call(r,(e=>void 0!==e))[0]}function Ye(){return Xe(oe()(32).toString("base64"))}function Qe(e){return Xe(ie()("sha256").update(e).digest("base64"))}function Xe(e){return e.replace(/\+/g,"-").replace(/\//g,"_").replace(/=/g,"")}const et=e=>!e||!(!ce(e)||!e.isEmpty())},2518:(e,t,r)=>{"use strict";function n(e){return function(e){try{return!!JSON.parse(e)}catch(e){return null}}(e)?"json":null}r.d(t,{O:()=>n})},27504:(e,t,r)=>{"use strict";r.d(t,{Z:()=>n});const n=function(){var e={location:{},history:{},open:()=>{},close:()=>{},File:function(){}};if("undefined"==typeof window)return e;try{e=window;for(var t of["File","Blob","FormData"])t in window&&(e[t]=window[t])}catch(e){console.error(e)}return e}()},19069:(e,t,r)=>{"use strict";r.d(t,{Z:()=>c});var n=r(14418),o=r.n(n),a=r(58118),i=r.n(a),s=r(43393),l=r.n(s);const u=l().Set.of("type","format","items","default","maximum","exclusiveMaximum","minimum","exclusiveMinimum","maxLength","minLength","pattern","maxItems","minItems","uniqueItems","enum","multipleOf");function c(e){let{isOAS3:t}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(!l().Map.isMap(e))return{schema:l().Map(),parameterContentMediaType:null};if(!t)return"body"===e.get("in")?{schema:e.get("schema",l().Map()),parameterContentMediaType:null}:{schema:o()(e).call(e,((e,t)=>i()(u).call(u,t))),parameterContentMediaType:null};if(e.get("content")){const t=e.get("content",l().Map({})).keySeq().first();return{schema:e.getIn(["content",t,"schema"],l().Map()),parameterContentMediaType:t}}return{schema:e.get("schema")?e.get("schema",l().Map()):l().Map(),parameterContentMediaType:null}}},60314:(e,t,r)=>{"use strict";r.d(t,{Z:()=>x});var n=r(58309),o=r.n(n),a=r(2250),i=r.n(a),s=r(25110),l=r.n(s),u=r(8712),c=r.n(u),p=r(51679),f=r.n(p),h=r(12373),d=r.n(h),m=r(18492),g=r.n(m),v=r(88306),y=r.n(v);const b=e=>t=>o()(e)&&o()(t)&&e.length===t.length&&i()(e).call(e,((e,r)=>e===t[r])),w=function(){for(var e=arguments.length,t=new Array(e),r=0;r1&&void 0!==arguments[1]?arguments[1]:w;const{Cache:r}=y();y().Cache=E;const n=y()(e,t);return y().Cache=r,n}},79742:(e,t)=>{"use strict";t.byteLength=function(e){var t=l(e),r=t[0],n=t[1];return 3*(r+n)/4-n},t.toByteArray=function(e){var t,r,a=l(e),i=a[0],s=a[1],u=new o(function(e,t,r){return 3*(t+r)/4-r}(0,i,s)),c=0,p=s>0?i-4:i;for(r=0;r>16&255,u[c++]=t>>8&255,u[c++]=255&t;2===s&&(t=n[e.charCodeAt(r)]<<2|n[e.charCodeAt(r+1)]>>4,u[c++]=255&t);1===s&&(t=n[e.charCodeAt(r)]<<10|n[e.charCodeAt(r+1)]<<4|n[e.charCodeAt(r+2)]>>2,u[c++]=t>>8&255,u[c++]=255&t);return u},t.fromByteArray=function(e){for(var t,n=e.length,o=n%3,a=[],i=16383,s=0,l=n-o;sl?l:s+i));1===o?(t=e[n-1],a.push(r[t>>2]+r[t<<4&63]+"==")):2===o&&(t=(e[n-2]<<8)+e[n-1],a.push(r[t>>10]+r[t>>4&63]+r[t<<2&63]+"="));return a.join("")};for(var r=[],n=[],o="undefined"!=typeof Uint8Array?Uint8Array:Array,a="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",i=0,s=a.length;i0)throw new Error("Invalid string. Length must be a multiple of 4");var r=e.indexOf("=");return-1===r&&(r=t),[r,r===t?0:4-r%4]}function u(e,t,n){for(var o,a,i=[],s=t;s>18&63]+r[a>>12&63]+r[a>>6&63]+r[63&a]);return i.join("")}n["-".charCodeAt(0)]=62,n["_".charCodeAt(0)]=63},48764:(e,t,r)=>{"use strict";const n=r(79742),o=r(80645),a="function"==typeof Symbol&&"function"==typeof Symbol.for?Symbol.for("nodejs.util.inspect.custom"):null;t.Buffer=l,t.SlowBuffer=function(e){+e!=e&&(e=0);return l.alloc(+e)},t.INSPECT_MAX_BYTES=50;const i=2147483647;function s(e){if(e>i)throw new RangeError('The value "'+e+'" is invalid for option "size"');const t=new Uint8Array(e);return Object.setPrototypeOf(t,l.prototype),t}function l(e,t,r){if("number"==typeof e){if("string"==typeof t)throw new TypeError('The "string" argument must be of type string. Received type number');return p(e)}return u(e,t,r)}function u(e,t,r){if("string"==typeof e)return function(e,t){"string"==typeof t&&""!==t||(t="utf8");if(!l.isEncoding(t))throw new TypeError("Unknown encoding: "+t);const r=0|m(e,t);let n=s(r);const o=n.write(e,t);o!==r&&(n=n.slice(0,o));return n}(e,t);if(ArrayBuffer.isView(e))return function(e){if(G(e,Uint8Array)){const t=new Uint8Array(e);return h(t.buffer,t.byteOffset,t.byteLength)}return f(e)}(e);if(null==e)throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof e);if(G(e,ArrayBuffer)||e&&G(e.buffer,ArrayBuffer))return h(e,t,r);if("undefined"!=typeof SharedArrayBuffer&&(G(e,SharedArrayBuffer)||e&&G(e.buffer,SharedArrayBuffer)))return h(e,t,r);if("number"==typeof e)throw new TypeError('The "value" argument must not be of type number. Received type number');const n=e.valueOf&&e.valueOf();if(null!=n&&n!==e)return l.from(n,t,r);const o=function(e){if(l.isBuffer(e)){const t=0|d(e.length),r=s(t);return 0===r.length||e.copy(r,0,0,t),r}if(void 0!==e.length)return"number"!=typeof e.length||Z(e.length)?s(0):f(e);if("Buffer"===e.type&&Array.isArray(e.data))return f(e.data)}(e);if(o)return o;if("undefined"!=typeof Symbol&&null!=Symbol.toPrimitive&&"function"==typeof e[Symbol.toPrimitive])return l.from(e[Symbol.toPrimitive]("string"),t,r);throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof e)}function c(e){if("number"!=typeof e)throw new TypeError('"size" argument must be of type number');if(e<0)throw new RangeError('The value "'+e+'" is invalid for option "size"')}function p(e){return c(e),s(e<0?0:0|d(e))}function f(e){const t=e.length<0?0:0|d(e.length),r=s(t);for(let n=0;n=i)throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+i.toString(16)+" bytes");return 0|e}function m(e,t){if(l.isBuffer(e))return e.length;if(ArrayBuffer.isView(e)||G(e,ArrayBuffer))return e.byteLength;if("string"!=typeof e)throw new TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+typeof e);const r=e.length,n=arguments.length>2&&!0===arguments[2];if(!n&&0===r)return 0;let o=!1;for(;;)switch(t){case"ascii":case"latin1":case"binary":return r;case"utf8":case"utf-8":return H(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*r;case"hex":return r>>>1;case"base64":return J(e).length;default:if(o)return n?-1:H(e).length;t=(""+t).toLowerCase(),o=!0}}function g(e,t,r){let n=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===r||r>this.length)&&(r=this.length),r<=0)return"";if((r>>>=0)<=(t>>>=0))return"";for(e||(e="utf8");;)switch(e){case"hex":return I(this,t,r);case"utf8":case"utf-8":return k(this,t,r);case"ascii":return O(this,t,r);case"latin1":case"binary":return j(this,t,r);case"base64":return A(this,t,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return N(this,t,r);default:if(n)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),n=!0}}function v(e,t,r){const n=e[t];e[t]=e[r],e[r]=n}function y(e,t,r,n,o){if(0===e.length)return-1;if("string"==typeof r?(n=r,r=0):r>2147483647?r=2147483647:r<-2147483648&&(r=-2147483648),Z(r=+r)&&(r=o?0:e.length-1),r<0&&(r=e.length+r),r>=e.length){if(o)return-1;r=e.length-1}else if(r<0){if(!o)return-1;r=0}if("string"==typeof t&&(t=l.from(t,n)),l.isBuffer(t))return 0===t.length?-1:b(e,t,r,n,o);if("number"==typeof t)return t&=255,"function"==typeof Uint8Array.prototype.indexOf?o?Uint8Array.prototype.indexOf.call(e,t,r):Uint8Array.prototype.lastIndexOf.call(e,t,r):b(e,[t],r,n,o);throw new TypeError("val must be string, number or Buffer")}function b(e,t,r,n,o){let a,i=1,s=e.length,l=t.length;if(void 0!==n&&("ucs2"===(n=String(n).toLowerCase())||"ucs-2"===n||"utf16le"===n||"utf-16le"===n)){if(e.length<2||t.length<2)return-1;i=2,s/=2,l/=2,r/=2}function u(e,t){return 1===i?e[t]:e.readUInt16BE(t*i)}if(o){let n=-1;for(a=r;as&&(r=s-l),a=r;a>=0;a--){let r=!0;for(let n=0;no&&(n=o):n=o;const a=t.length;let i;for(n>a/2&&(n=a/2),i=0;i>8,o=r%256,a.push(o),a.push(n);return a}(t,e.length-r),e,r,n)}function A(e,t,r){return 0===t&&r===e.length?n.fromByteArray(e):n.fromByteArray(e.slice(t,r))}function k(e,t,r){r=Math.min(e.length,r);const n=[];let o=t;for(;o239?4:t>223?3:t>191?2:1;if(o+i<=r){let r,n,s,l;switch(i){case 1:t<128&&(a=t);break;case 2:r=e[o+1],128==(192&r)&&(l=(31&t)<<6|63&r,l>127&&(a=l));break;case 3:r=e[o+1],n=e[o+2],128==(192&r)&&128==(192&n)&&(l=(15&t)<<12|(63&r)<<6|63&n,l>2047&&(l<55296||l>57343)&&(a=l));break;case 4:r=e[o+1],n=e[o+2],s=e[o+3],128==(192&r)&&128==(192&n)&&128==(192&s)&&(l=(15&t)<<18|(63&r)<<12|(63&n)<<6|63&s,l>65535&&l<1114112&&(a=l))}}null===a?(a=65533,i=1):a>65535&&(a-=65536,n.push(a>>>10&1023|55296),a=56320|1023&a),n.push(a),o+=i}return function(e){const t=e.length;if(t<=C)return String.fromCharCode.apply(String,e);let r="",n=0;for(;nn.length?(l.isBuffer(t)||(t=l.from(t)),t.copy(n,o)):Uint8Array.prototype.set.call(n,t,o);else{if(!l.isBuffer(t))throw new TypeError('"list" argument must be an Array of Buffers');t.copy(n,o)}o+=t.length}return n},l.byteLength=m,l.prototype._isBuffer=!0,l.prototype.swap16=function(){const e=this.length;if(e%2!=0)throw new RangeError("Buffer size must be a multiple of 16-bits");for(let t=0;tr&&(e+=" ... "),""},a&&(l.prototype[a]=l.prototype.inspect),l.prototype.compare=function(e,t,r,n,o){if(G(e,Uint8Array)&&(e=l.from(e,e.offset,e.byteLength)),!l.isBuffer(e))throw new TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+typeof e);if(void 0===t&&(t=0),void 0===r&&(r=e?e.length:0),void 0===n&&(n=0),void 0===o&&(o=this.length),t<0||r>e.length||n<0||o>this.length)throw new RangeError("out of range index");if(n>=o&&t>=r)return 0;if(n>=o)return-1;if(t>=r)return 1;if(this===e)return 0;let a=(o>>>=0)-(n>>>=0),i=(r>>>=0)-(t>>>=0);const s=Math.min(a,i),u=this.slice(n,o),c=e.slice(t,r);for(let e=0;e>>=0,isFinite(r)?(r>>>=0,void 0===n&&(n="utf8")):(n=r,r=void 0)}const o=this.length-t;if((void 0===r||r>o)&&(r=o),e.length>0&&(r<0||t<0)||t>this.length)throw new RangeError("Attempt to write outside buffer bounds");n||(n="utf8");let a=!1;for(;;)switch(n){case"hex":return w(this,e,t,r);case"utf8":case"utf-8":return E(this,e,t,r);case"ascii":case"latin1":case"binary":return x(this,e,t,r);case"base64":return _(this,e,t,r);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return S(this,e,t,r);default:if(a)throw new TypeError("Unknown encoding: "+n);n=(""+n).toLowerCase(),a=!0}},l.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};const C=4096;function O(e,t,r){let n="";r=Math.min(e.length,r);for(let o=t;on)&&(r=n);let o="";for(let n=t;nr)throw new RangeError("Trying to access beyond buffer length")}function P(e,t,r,n,o,a){if(!l.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>o||te.length)throw new RangeError("Index out of range")}function R(e,t,r,n,o){q(t,n,o,e,r,7);let a=Number(t&BigInt(4294967295));e[r++]=a,a>>=8,e[r++]=a,a>>=8,e[r++]=a,a>>=8,e[r++]=a;let i=Number(t>>BigInt(32)&BigInt(4294967295));return e[r++]=i,i>>=8,e[r++]=i,i>>=8,e[r++]=i,i>>=8,e[r++]=i,r}function M(e,t,r,n,o){q(t,n,o,e,r,7);let a=Number(t&BigInt(4294967295));e[r+7]=a,a>>=8,e[r+6]=a,a>>=8,e[r+5]=a,a>>=8,e[r+4]=a;let i=Number(t>>BigInt(32)&BigInt(4294967295));return e[r+3]=i,i>>=8,e[r+2]=i,i>>=8,e[r+1]=i,i>>=8,e[r]=i,r+8}function D(e,t,r,n,o,a){if(r+n>e.length)throw new RangeError("Index out of range");if(r<0)throw new RangeError("Index out of range")}function L(e,t,r,n,a){return t=+t,r>>>=0,a||D(e,0,r,4),o.write(e,t,r,n,23,4),r+4}function B(e,t,r,n,a){return t=+t,r>>>=0,a||D(e,0,r,8),o.write(e,t,r,n,52,8),r+8}l.prototype.slice=function(e,t){const r=this.length;(e=~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),(t=void 0===t?r:~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),t>>=0,t>>>=0,r||T(e,t,this.length);let n=this[e],o=1,a=0;for(;++a>>=0,t>>>=0,r||T(e,t,this.length);let n=this[e+--t],o=1;for(;t>0&&(o*=256);)n+=this[e+--t]*o;return n},l.prototype.readUint8=l.prototype.readUInt8=function(e,t){return e>>>=0,t||T(e,1,this.length),this[e]},l.prototype.readUint16LE=l.prototype.readUInt16LE=function(e,t){return e>>>=0,t||T(e,2,this.length),this[e]|this[e+1]<<8},l.prototype.readUint16BE=l.prototype.readUInt16BE=function(e,t){return e>>>=0,t||T(e,2,this.length),this[e]<<8|this[e+1]},l.prototype.readUint32LE=l.prototype.readUInt32LE=function(e,t){return e>>>=0,t||T(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},l.prototype.readUint32BE=l.prototype.readUInt32BE=function(e,t){return e>>>=0,t||T(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},l.prototype.readBigUInt64LE=Q((function(e){V(e>>>=0,"offset");const t=this[e],r=this[e+7];void 0!==t&&void 0!==r||$(e,this.length-8);const n=t+256*this[++e]+65536*this[++e]+this[++e]*2**24,o=this[++e]+256*this[++e]+65536*this[++e]+r*2**24;return BigInt(n)+(BigInt(o)<>>=0,"offset");const t=this[e],r=this[e+7];void 0!==t&&void 0!==r||$(e,this.length-8);const n=t*2**24+65536*this[++e]+256*this[++e]+this[++e],o=this[++e]*2**24+65536*this[++e]+256*this[++e]+r;return(BigInt(n)<>>=0,t>>>=0,r||T(e,t,this.length);let n=this[e],o=1,a=0;for(;++a=o&&(n-=Math.pow(2,8*t)),n},l.prototype.readIntBE=function(e,t,r){e>>>=0,t>>>=0,r||T(e,t,this.length);let n=t,o=1,a=this[e+--n];for(;n>0&&(o*=256);)a+=this[e+--n]*o;return o*=128,a>=o&&(a-=Math.pow(2,8*t)),a},l.prototype.readInt8=function(e,t){return e>>>=0,t||T(e,1,this.length),128&this[e]?-1*(255-this[e]+1):this[e]},l.prototype.readInt16LE=function(e,t){e>>>=0,t||T(e,2,this.length);const r=this[e]|this[e+1]<<8;return 32768&r?4294901760|r:r},l.prototype.readInt16BE=function(e,t){e>>>=0,t||T(e,2,this.length);const r=this[e+1]|this[e]<<8;return 32768&r?4294901760|r:r},l.prototype.readInt32LE=function(e,t){return e>>>=0,t||T(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},l.prototype.readInt32BE=function(e,t){return e>>>=0,t||T(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},l.prototype.readBigInt64LE=Q((function(e){V(e>>>=0,"offset");const t=this[e],r=this[e+7];void 0!==t&&void 0!==r||$(e,this.length-8);const n=this[e+4]+256*this[e+5]+65536*this[e+6]+(r<<24);return(BigInt(n)<>>=0,"offset");const t=this[e],r=this[e+7];void 0!==t&&void 0!==r||$(e,this.length-8);const n=(t<<24)+65536*this[++e]+256*this[++e]+this[++e];return(BigInt(n)<>>=0,t||T(e,4,this.length),o.read(this,e,!0,23,4)},l.prototype.readFloatBE=function(e,t){return e>>>=0,t||T(e,4,this.length),o.read(this,e,!1,23,4)},l.prototype.readDoubleLE=function(e,t){return e>>>=0,t||T(e,8,this.length),o.read(this,e,!0,52,8)},l.prototype.readDoubleBE=function(e,t){return e>>>=0,t||T(e,8,this.length),o.read(this,e,!1,52,8)},l.prototype.writeUintLE=l.prototype.writeUIntLE=function(e,t,r,n){if(e=+e,t>>>=0,r>>>=0,!n){P(this,e,t,r,Math.pow(2,8*r)-1,0)}let o=1,a=0;for(this[t]=255&e;++a>>=0,r>>>=0,!n){P(this,e,t,r,Math.pow(2,8*r)-1,0)}let o=r-1,a=1;for(this[t+o]=255&e;--o>=0&&(a*=256);)this[t+o]=e/a&255;return t+r},l.prototype.writeUint8=l.prototype.writeUInt8=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,1,255,0),this[t]=255&e,t+1},l.prototype.writeUint16LE=l.prototype.writeUInt16LE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,2,65535,0),this[t]=255&e,this[t+1]=e>>>8,t+2},l.prototype.writeUint16BE=l.prototype.writeUInt16BE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,2,65535,0),this[t]=e>>>8,this[t+1]=255&e,t+2},l.prototype.writeUint32LE=l.prototype.writeUInt32LE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,4,4294967295,0),this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e,t+4},l.prototype.writeUint32BE=l.prototype.writeUInt32BE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,4,4294967295,0),this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e,t+4},l.prototype.writeBigUInt64LE=Q((function(e,t=0){return R(this,e,t,BigInt(0),BigInt("0xffffffffffffffff"))})),l.prototype.writeBigUInt64BE=Q((function(e,t=0){return M(this,e,t,BigInt(0),BigInt("0xffffffffffffffff"))})),l.prototype.writeIntLE=function(e,t,r,n){if(e=+e,t>>>=0,!n){const n=Math.pow(2,8*r-1);P(this,e,t,r,n-1,-n)}let o=0,a=1,i=0;for(this[t]=255&e;++o>0)-i&255;return t+r},l.prototype.writeIntBE=function(e,t,r,n){if(e=+e,t>>>=0,!n){const n=Math.pow(2,8*r-1);P(this,e,t,r,n-1,-n)}let o=r-1,a=1,i=0;for(this[t+o]=255&e;--o>=0&&(a*=256);)e<0&&0===i&&0!==this[t+o+1]&&(i=1),this[t+o]=(e/a>>0)-i&255;return t+r},l.prototype.writeInt8=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,1,127,-128),e<0&&(e=255+e+1),this[t]=255&e,t+1},l.prototype.writeInt16LE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,2,32767,-32768),this[t]=255&e,this[t+1]=e>>>8,t+2},l.prototype.writeInt16BE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,2,32767,-32768),this[t]=e>>>8,this[t+1]=255&e,t+2},l.prototype.writeInt32LE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,4,2147483647,-2147483648),this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24,t+4},l.prototype.writeInt32BE=function(e,t,r){return e=+e,t>>>=0,r||P(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e,t+4},l.prototype.writeBigInt64LE=Q((function(e,t=0){return R(this,e,t,-BigInt("0x8000000000000000"),BigInt("0x7fffffffffffffff"))})),l.prototype.writeBigInt64BE=Q((function(e,t=0){return M(this,e,t,-BigInt("0x8000000000000000"),BigInt("0x7fffffffffffffff"))})),l.prototype.writeFloatLE=function(e,t,r){return L(this,e,t,!0,r)},l.prototype.writeFloatBE=function(e,t,r){return L(this,e,t,!1,r)},l.prototype.writeDoubleLE=function(e,t,r){return B(this,e,t,!0,r)},l.prototype.writeDoubleBE=function(e,t,r){return B(this,e,t,!1,r)},l.prototype.copy=function(e,t,r,n){if(!l.isBuffer(e))throw new TypeError("argument should be a Buffer");if(r||(r=0),n||0===n||(n=this.length),t>=e.length&&(t=e.length),t||(t=0),n>0&&n=this.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("sourceEnd out of bounds");n>this.length&&(n=this.length),e.length-t>>=0,r=void 0===r?this.length:r>>>0,e||(e=0),"number"==typeof e)for(o=t;o=n+4;r-=3)t=`_${e.slice(r-3,r)}${t}`;return`${e.slice(0,r)}${t}`}function q(e,t,r,n,o,a){if(e>r||e3?0===t||t===BigInt(0)?`>= 0${n} and < 2${n} ** ${8*(a+1)}${n}`:`>= -(2${n} ** ${8*(a+1)-1}${n}) and < 2 ** ${8*(a+1)-1}${n}`:`>= ${t}${n} and <= ${r}${n}`,new F.ERR_OUT_OF_RANGE("value",o,e)}!function(e,t,r){V(t,"offset"),void 0!==e[t]&&void 0!==e[t+r]||$(t,e.length-(r+1))}(n,o,a)}function V(e,t){if("number"!=typeof e)throw new F.ERR_INVALID_ARG_TYPE(t,"number",e)}function $(e,t,r){if(Math.floor(e)!==e)throw V(e,r),new F.ERR_OUT_OF_RANGE(r||"offset","an integer",e);if(t<0)throw new F.ERR_BUFFER_OUT_OF_BOUNDS;throw new F.ERR_OUT_OF_RANGE(r||"offset",`>= ${r?1:0} and <= ${t}`,e)}z("ERR_BUFFER_OUT_OF_BOUNDS",(function(e){return e?`${e} is outside of buffer bounds`:"Attempt to access memory outside buffer bounds"}),RangeError),z("ERR_INVALID_ARG_TYPE",(function(e,t){return`The "${e}" argument must be of type number. Received type ${typeof t}`}),TypeError),z("ERR_OUT_OF_RANGE",(function(e,t,r){let n=`The value of "${e}" is out of range.`,o=r;return Number.isInteger(r)&&Math.abs(r)>2**32?o=U(String(r)):"bigint"==typeof r&&(o=String(r),(r>BigInt(2)**BigInt(32)||r<-(BigInt(2)**BigInt(32)))&&(o=U(o)),o+="n"),n+=` It must be ${t}. Received ${o}`,n}),RangeError);const W=/[^+/0-9A-Za-z-_]/g;function H(e,t){let r;t=t||1/0;const n=e.length;let o=null;const a=[];for(let i=0;i55295&&r<57344){if(!o){if(r>56319){(t-=3)>-1&&a.push(239,191,189);continue}if(i+1===n){(t-=3)>-1&&a.push(239,191,189);continue}o=r;continue}if(r<56320){(t-=3)>-1&&a.push(239,191,189),o=r;continue}r=65536+(o-55296<<10|r-56320)}else o&&(t-=3)>-1&&a.push(239,191,189);if(o=null,r<128){if((t-=1)<0)break;a.push(r)}else if(r<2048){if((t-=2)<0)break;a.push(r>>6|192,63&r|128)}else if(r<65536){if((t-=3)<0)break;a.push(r>>12|224,r>>6&63|128,63&r|128)}else{if(!(r<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;a.push(r>>18|240,r>>12&63|128,r>>6&63|128,63&r|128)}}return a}function J(e){return n.toByteArray(function(e){if((e=(e=e.split("=")[0]).trim().replace(W,"")).length<2)return"";for(;e.length%4!=0;)e+="=";return e}(e))}function K(e,t,r,n){let o;for(o=0;o=t.length||o>=e.length);++o)t[o+r]=e[o];return o}function G(e,t){return e instanceof t||null!=e&&null!=e.constructor&&null!=e.constructor.name&&e.constructor.name===t.name}function Z(e){return e!=e}const Y=function(){const e="0123456789abcdef",t=new Array(256);for(let r=0;r<16;++r){const n=16*r;for(let o=0;o<16;++o)t[n+o]=e[r]+e[o]}return t}();function Q(e){return"undefined"==typeof BigInt?X:e}function X(){throw new Error("BigInt not supported")}},21924:(e,t,r)=>{"use strict";var n=r(40210),o=r(55559),a=o(n("String.prototype.indexOf"));e.exports=function(e,t){var r=n(e,!!t);return"function"==typeof r&&a(e,".prototype.")>-1?o(r):r}},55559:(e,t,r)=>{"use strict";var n=r(58612),o=r(40210),a=o("%Function.prototype.apply%"),i=o("%Function.prototype.call%"),s=o("%Reflect.apply%",!0)||n.call(i,a),l=o("%Object.getOwnPropertyDescriptor%",!0),u=o("%Object.defineProperty%",!0),c=o("%Math.max%");if(u)try{u({},"a",{value:1})}catch(e){u=null}e.exports=function(e){var t=s(n,i,arguments);if(l&&u){var r=l(t,"length");r.configurable&&u(t,"length",{value:1+c(0,e.length-(arguments.length-1))})}return t};var p=function(){return s(n,a,arguments)};u?u(e.exports,"apply",{value:p}):e.exports.apply=p},94184:(e,t)=>{var r;!function(){"use strict";var n={}.hasOwnProperty;function o(){for(var e=[],t=0;t{"use strict";t.parse=function(e,t){if("string"!=typeof e)throw new TypeError("argument str must be a string");var r={},n=(t||{}).decode||o,a=0;for(;a{"use strict";var n=r(11742),o={"text/plain":"Text","text/html":"Url",default:"Text"};e.exports=function(e,t){var r,a,i,s,l,u,c=!1;t||(t={}),r=t.debug||!1;try{if(i=n(),s=document.createRange(),l=document.getSelection(),(u=document.createElement("span")).textContent=e,u.style.all="unset",u.style.position="fixed",u.style.top=0,u.style.clip="rect(0, 0, 0, 0)",u.style.whiteSpace="pre",u.style.webkitUserSelect="text",u.style.MozUserSelect="text",u.style.msUserSelect="text",u.style.userSelect="text",u.addEventListener("copy",(function(n){if(n.stopPropagation(),t.format)if(n.preventDefault(),void 0===n.clipboardData){r&&console.warn("unable to use e.clipboardData"),r&&console.warn("trying IE specific stuff"),window.clipboardData.clearData();var a=o[t.format]||o.default;window.clipboardData.setData(a,e)}else n.clipboardData.clearData(),n.clipboardData.setData(t.format,e);t.onCopy&&(n.preventDefault(),t.onCopy(n.clipboardData))})),document.body.appendChild(u),s.selectNodeContents(u),l.addRange(s),!document.execCommand("copy"))throw new Error("copy command was unsuccessful");c=!0}catch(n){r&&console.error("unable to copy using execCommand: ",n),r&&console.warn("trying IE specific stuff");try{window.clipboardData.setData(t.format||"text",e),t.onCopy&&t.onCopy(window.clipboardData),c=!0}catch(n){r&&console.error("unable to copy using clipboardData: ",n),r&&console.error("falling back to prompt"),a=function(e){var t=(/mac os x/i.test(navigator.userAgent)?"⌘":"Ctrl")+"+C";return e.replace(/#{\s*key\s*}/g,t)}("message"in t?t.message:"Copy to clipboard: #{key}, Enter"),window.prompt(a,e)}}finally{l&&("function"==typeof l.removeRange?l.removeRange(s):l.removeAllRanges()),u&&document.body.removeChild(u),i()}return c}},95299:(e,t,r)=>{var n=r(24848);e.exports=n},83450:(e,t,r)=>{var n=r(83363);e.exports=n},66820:(e,t,r)=>{var n=r(56243);e.exports=n},5023:(e,t,r)=>{var n=r(72369);e.exports=n},90093:(e,t,r)=>{var n=r(28196);e.exports=n},3688:(e,t,r)=>{var n=r(11955);e.exports=n},83838:(e,t,r)=>{var n=r(46279);e.exports=n},15684:(e,t,r)=>{var n=r(19373);e.exports=n},99826:(e,t,r)=>{var n=r(28427);e.exports=n},84234:(e,t,r)=>{var n=r(82073);e.exports=n},65362:(e,t,r)=>{var n=r(63383);e.exports=n},32271:(e,t,r)=>{var n=r(14471);e.exports=n},91254:(e,t,r)=>{var n=r(57396);e.exports=n},43536:(e,t,r)=>{var n=r(41910);e.exports=n},37331:(e,t,r)=>{var n=r(79427);e.exports=n},68522:(e,t,r)=>{var n=r(62857);e.exports=n},73151:(e,t,r)=>{var n=r(9534);e.exports=n},99565:(e,t,r)=>{var n=r(96507);e.exports=n},45012:(e,t,r)=>{var n=r(23059);e.exports=n},78690:(e,t,r)=>{var n=r(16670);e.exports=n},25626:(e,t,r)=>{var n=r(27460);e.exports=n},80281:(e,t,r)=>{var n=r(92547);e.exports=n},40031:(e,t,r)=>{var n=r(46509);e.exports=n},54493:(e,t,r)=>{r(77971),r(53242);var n=r(54058);e.exports=n.Array.from},24034:(e,t,r)=>{r(92737);var n=r(54058);e.exports=n.Array.isArray},15367:(e,t,r)=>{r(85906);var n=r(35703);e.exports=n("Array").concat},12710:(e,t,r)=>{r(66274),r(55967);var n=r(35703);e.exports=n("Array").entries},51459:(e,t,r)=>{r(48851);var n=r(35703);e.exports=n("Array").every},6172:(e,t,r)=>{r(80290);var n=r(35703);e.exports=n("Array").fill},62383:(e,t,r)=>{r(21501);var n=r(35703);e.exports=n("Array").filter},60009:(e,t,r)=>{r(44929);var n=r(35703);e.exports=n("Array").findIndex},17671:(e,t,r)=>{r(80833);var n=r(35703);e.exports=n("Array").find},99324:(e,t,r)=>{r(2437);var n=r(35703);e.exports=n("Array").forEach},80991:(e,t,r)=>{r(97690);var n=r(35703);e.exports=n("Array").includes},8700:(e,t,r)=>{r(99076);var n=r(35703);e.exports=n("Array").indexOf},95909:(e,t,r)=>{r(66274),r(55967);var n=r(35703);e.exports=n("Array").keys},6442:(e,t,r)=>{r(75915);var n=r(35703);e.exports=n("Array").lastIndexOf},23866:(e,t,r)=>{r(68787);var n=r(35703);e.exports=n("Array").map},52999:(e,t,r)=>{r(81876);var n=r(35703);e.exports=n("Array").reduce},91876:(e,t,r)=>{r(11490);var n=r(35703);e.exports=n("Array").reverse},24900:(e,t,r)=>{r(60186);var n=r(35703);e.exports=n("Array").slice},3824:(e,t,r)=>{r(36026);var n=r(35703);e.exports=n("Array").some},2948:(e,t,r)=>{r(4115);var n=r(35703);e.exports=n("Array").sort},78209:(e,t,r)=>{r(98611);var n=r(35703);e.exports=n("Array").splice},14423:(e,t,r)=>{r(66274),r(55967);var n=r(35703);e.exports=n("Array").values},81103:(e,t,r)=>{r(95160);var n=r(54058);e.exports=n.Date.now},27700:(e,t,r)=>{r(73381);var n=r(35703);e.exports=n("Function").bind},13830:(e,t,r)=>{r(66274),r(77971);var n=r(22902);e.exports=n},91031:(e,t,r)=>{r(52595),e.exports=r(21899)},16246:(e,t,r)=>{var n=r(7046),o=r(27700),a=Function.prototype;e.exports=function(e){var t=e.bind;return e===a||n(a,e)&&t===a.bind?o:t}},56043:(e,t,r)=>{var n=r(7046),o=r(15367),a=Array.prototype;e.exports=function(e){var t=e.concat;return e===a||n(a,e)&&t===a.concat?o:t}},13160:(e,t,r)=>{var n=r(7046),o=r(51459),a=Array.prototype;e.exports=function(e){var t=e.every;return e===a||n(a,e)&&t===a.every?o:t}},80446:(e,t,r)=>{var n=r(7046),o=r(6172),a=Array.prototype;e.exports=function(e){var t=e.fill;return e===a||n(a,e)&&t===a.fill?o:t}},2480:(e,t,r)=>{var n=r(7046),o=r(62383),a=Array.prototype;e.exports=function(e){var t=e.filter;return e===a||n(a,e)&&t===a.filter?o:t}},7147:(e,t,r)=>{var n=r(7046),o=r(60009),a=Array.prototype;e.exports=function(e){var t=e.findIndex;return e===a||n(a,e)&&t===a.findIndex?o:t}},32236:(e,t,r)=>{var n=r(7046),o=r(17671),a=Array.prototype;e.exports=function(e){var t=e.find;return e===a||n(a,e)&&t===a.find?o:t}},58557:(e,t,r)=>{var n=r(7046),o=r(80991),a=r(21631),i=Array.prototype,s=String.prototype;e.exports=function(e){var t=e.includes;return e===i||n(i,e)&&t===i.includes?o:"string"==typeof e||e===s||n(s,e)&&t===s.includes?a:t}},34570:(e,t,r)=>{var n=r(7046),o=r(8700),a=Array.prototype;e.exports=function(e){var t=e.indexOf;return e===a||n(a,e)&&t===a.indexOf?o:t}},57564:(e,t,r)=>{var n=r(7046),o=r(6442),a=Array.prototype;e.exports=function(e){var t=e.lastIndexOf;return e===a||n(a,e)&&t===a.lastIndexOf?o:t}},88287:(e,t,r)=>{var n=r(7046),o=r(23866),a=Array.prototype;e.exports=function(e){var t=e.map;return e===a||n(a,e)&&t===a.map?o:t}},68025:(e,t,r)=>{var n=r(7046),o=r(52999),a=Array.prototype;e.exports=function(e){var t=e.reduce;return e===a||n(a,e)&&t===a.reduce?o:t}},59257:(e,t,r)=>{var n=r(7046),o=r(80454),a=String.prototype;e.exports=function(e){var t=e.repeat;return"string"==typeof e||e===a||n(a,e)&&t===a.repeat?o:t}},91060:(e,t,r)=>{var n=r(7046),o=r(91876),a=Array.prototype;e.exports=function(e){var t=e.reverse;return e===a||n(a,e)&&t===a.reverse?o:t}},69601:(e,t,r)=>{var n=r(7046),o=r(24900),a=Array.prototype;e.exports=function(e){var t=e.slice;return e===a||n(a,e)&&t===a.slice?o:t}},28299:(e,t,r)=>{var n=r(7046),o=r(3824),a=Array.prototype;e.exports=function(e){var t=e.some;return e===a||n(a,e)&&t===a.some?o:t}},69355:(e,t,r)=>{var n=r(7046),o=r(2948),a=Array.prototype;e.exports=function(e){var t=e.sort;return e===a||n(a,e)&&t===a.sort?o:t}},18339:(e,t,r)=>{var n=r(7046),o=r(78209),a=Array.prototype;e.exports=function(e){var t=e.splice;return e===a||n(a,e)&&t===a.splice?o:t}},71611:(e,t,r)=>{var n=r(7046),o=r(3269),a=String.prototype;e.exports=function(e){var t=e.startsWith;return"string"==typeof e||e===a||n(a,e)&&t===a.startsWith?o:t}},62774:(e,t,r)=>{var n=r(7046),o=r(13348),a=String.prototype;e.exports=function(e){var t=e.trim;return"string"==typeof e||e===a||n(a,e)&&t===a.trim?o:t}},84426:(e,t,r)=>{r(32619);var n=r(54058),o=r(79730);n.JSON||(n.JSON={stringify:JSON.stringify}),e.exports=function(e,t,r){return o(n.JSON.stringify,null,arguments)}},91018:(e,t,r)=>{r(66274),r(37501),r(55967),r(77971);var n=r(54058);e.exports=n.Map},45999:(e,t,r)=>{r(49221);var n=r(54058);e.exports=n.Object.assign},35254:(e,t,r)=>{r(53882);var n=r(54058).Object;e.exports=function(e,t){return n.create(e,t)}},7702:(e,t,r)=>{r(74979);var n=r(54058).Object,o=e.exports=function(e,t){return n.defineProperties(e,t)};n.defineProperties.sham&&(o.sham=!0)},48171:(e,t,r)=>{r(86450);var n=r(54058).Object,o=e.exports=function(e,t,r){return n.defineProperty(e,t,r)};n.defineProperty.sham&&(o.sham=!0)},73081:(e,t,r)=>{r(94366);var n=r(54058);e.exports=n.Object.entries},286:(e,t,r)=>{r(46924);var n=r(54058).Object,o=e.exports=function(e,t){return n.getOwnPropertyDescriptor(e,t)};n.getOwnPropertyDescriptor.sham&&(o.sham=!0)},92766:(e,t,r)=>{r(88482);var n=r(54058);e.exports=n.Object.getOwnPropertyDescriptors},30498:(e,t,r)=>{r(35824);var n=r(54058);e.exports=n.Object.getOwnPropertySymbols},13966:(e,t,r)=>{r(17405);var n=r(54058);e.exports=n.Object.getPrototypeOf},48494:(e,t,r)=>{r(21724);var n=r(54058);e.exports=n.Object.keys},3065:(e,t,r)=>{r(90108);var n=r(54058);e.exports=n.Object.setPrototypeOf},98430:(e,t,r)=>{r(26614);var n=r(54058);e.exports=n.Object.values},52956:(e,t,r)=>{r(47627),r(66274),r(55967),r(98881),r(4560),r(91302),r(44349),r(77971);var n=r(54058);e.exports=n.Promise},21631:(e,t,r)=>{r(11035);var n=r(35703);e.exports=n("String").includes},80454:(e,t,r)=>{r(60986);var n=r(35703);e.exports=n("String").repeat},3269:(e,t,r)=>{r(94761);var n=r(35703);e.exports=n("String").startsWith},13348:(e,t,r)=>{r(57398);var n=r(35703);e.exports=n("String").trim},57473:(e,t,r)=>{r(85906),r(55967),r(35824),r(8555),r(52615),r(21732),r(35903),r(1825),r(28394),r(45915),r(61766),r(62737),r(89911),r(74315),r(63131),r(64714),r(70659),r(69120),r(79413),r(1502);var n=r(54058);e.exports=n.Symbol},24227:(e,t,r)=>{r(66274),r(55967),r(77971),r(1825);var n=r(11477);e.exports=n.f("iterator")},32304:(e,t,r)=>{r(66274),r(55967),r(54334);var n=r(54058);e.exports=n.WeakMap},27385:(e,t,r)=>{var n=r(95299);e.exports=n},81522:(e,t,r)=>{var n=r(83450);e.exports=n},32209:(e,t,r)=>{var n=r(66820);e.exports=n},30888:(e,t,r)=>{r(9668);var n=r(5023);e.exports=n},14122:(e,t,r)=>{var n=r(90093);e.exports=n},44442:(e,t,r)=>{var n=r(3688);e.exports=n},57152:(e,t,r)=>{var n=r(83838);e.exports=n},69447:(e,t,r)=>{var n=r(15684);e.exports=n},17579:(e,t,r)=>{var n=r(99826);e.exports=n},81493:(e,t,r)=>{var n=r(84234);e.exports=n},60269:(e,t,r)=>{var n=r(65362);e.exports=n},76094:(e,t,r)=>{var n=r(32271);e.exports=n},70573:(e,t,r)=>{var n=r(91254);e.exports=n},73685:(e,t,r)=>{var n=r(43536);e.exports=n},27533:(e,t,r)=>{var n=r(37331);e.exports=n},39057:(e,t,r)=>{var n=r(68522);e.exports=n},84710:(e,t,r)=>{var n=r(73151);e.exports=n},74303:(e,t,r)=>{var n=r(99565);e.exports=n},93799:(e,t,r)=>{var n=r(45012);e.exports=n},55122:(e,t,r)=>{var n=r(78690);e.exports=n},29531:(e,t,r)=>{var n=r(25626);r(89731),r(55708),r(30014),r(88731),e.exports=n},86600:(e,t,r)=>{var n=r(80281);r(28783),r(43975),r(65799),r(45414),r(46774),r(80620),r(36172),e.exports=n},9759:(e,t,r)=>{var n=r(40031);e.exports=n},24883:(e,t,r)=>{var n=r(21899),o=r(57475),a=r(69826),i=n.TypeError;e.exports=function(e){if(o(e))return e;throw i(a(e)+" is not a function")}},174:(e,t,r)=>{var n=r(21899),o=r(24284),a=r(69826),i=n.TypeError;e.exports=function(e){if(o(e))return e;throw i(a(e)+" is not a constructor")}},11851:(e,t,r)=>{var n=r(21899),o=r(57475),a=n.String,i=n.TypeError;e.exports=function(e){if("object"==typeof e||o(e))return e;throw i("Can't set "+a(e)+" as a prototype")}},18479:e=>{e.exports=function(){}},5743:(e,t,r)=>{var n=r(21899),o=r(7046),a=n.TypeError;e.exports=function(e,t){if(o(t,e))return e;throw a("Incorrect invocation")}},96059:(e,t,r)=>{var n=r(21899),o=r(10941),a=n.String,i=n.TypeError;e.exports=function(e){if(o(e))return e;throw i(a(e)+" is not an object")}},97135:(e,t,r)=>{var n=r(95981);e.exports=n((function(){if("function"==typeof ArrayBuffer){var e=new ArrayBuffer(8);Object.isExtensible(e)&&Object.defineProperty(e,"a",{value:8})}}))},91860:(e,t,r)=>{"use strict";var n=r(89678),o=r(59413),a=r(10623);e.exports=function(e){for(var t=n(this),r=a(t),i=arguments.length,s=o(i>1?arguments[1]:void 0,r),l=i>2?arguments[2]:void 0,u=void 0===l?r:o(l,r);u>s;)t[s++]=e;return t}},56837:(e,t,r)=>{"use strict";var n=r(3610).forEach,o=r(34194)("forEach");e.exports=o?[].forEach:function(e){return n(this,e,arguments.length>1?arguments[1]:void 0)}},11354:(e,t,r)=>{"use strict";var n=r(21899),o=r(86843),a=r(78834),i=r(89678),s=r(75196),l=r(6782),u=r(24284),c=r(10623),p=r(55449),f=r(53476),h=r(22902),d=n.Array;e.exports=function(e){var t=i(e),r=u(this),n=arguments.length,m=n>1?arguments[1]:void 0,g=void 0!==m;g&&(m=o(m,n>2?arguments[2]:void 0));var v,y,b,w,E,x,_=h(t),S=0;if(!_||this==d&&l(_))for(v=c(t),y=r?new this(v):d(v);v>S;S++)x=g?m(t[S],S):t[S],p(y,S,x);else for(E=(w=f(t,_)).next,y=r?new this:[];!(b=a(E,w)).done;S++)x=g?s(w,m,[b.value,S],!0):b.value,p(y,S,x);return y.length=S,y}},31692:(e,t,r)=>{var n=r(74529),o=r(59413),a=r(10623),i=function(e){return function(t,r,i){var s,l=n(t),u=a(l),c=o(i,u);if(e&&r!=r){for(;u>c;)if((s=l[c++])!=s)return!0}else for(;u>c;c++)if((e||c in l)&&l[c]===r)return e||c||0;return!e&&-1}};e.exports={includes:i(!0),indexOf:i(!1)}},3610:(e,t,r)=>{var n=r(86843),o=r(95329),a=r(37026),i=r(89678),s=r(10623),l=r(64692),u=o([].push),c=function(e){var t=1==e,r=2==e,o=3==e,c=4==e,p=6==e,f=7==e,h=5==e||p;return function(d,m,g,v){for(var y,b,w=i(d),E=a(w),x=n(m,g),_=s(E),S=0,A=v||l,k=t?A(d,_):r||f?A(d,0):void 0;_>S;S++)if((h||S in E)&&(b=x(y=E[S],S,w),e))if(t)k[S]=b;else if(b)switch(e){case 3:return!0;case 5:return y;case 6:return S;case 2:u(k,y)}else switch(e){case 4:return!1;case 7:u(k,y)}return p?-1:o||c?c:k}};e.exports={forEach:c(0),map:c(1),filter:c(2),some:c(3),every:c(4),find:c(5),findIndex:c(6),filterReject:c(7)}},67145:(e,t,r)=>{"use strict";var n=r(79730),o=r(74529),a=r(62435),i=r(10623),s=r(34194),l=Math.min,u=[].lastIndexOf,c=!!u&&1/[1].lastIndexOf(1,-0)<0,p=s("lastIndexOf"),f=c||!p;e.exports=f?function(e){if(c)return n(u,this,arguments)||0;var t=o(this),r=i(t),s=r-1;for(arguments.length>1&&(s=l(s,a(arguments[1]))),s<0&&(s=r+s);s>=0;s--)if(s in t&&t[s]===e)return s||0;return-1}:u},50568:(e,t,r)=>{var n=r(95981),o=r(99813),a=r(53385),i=o("species");e.exports=function(e){return a>=51||!n((function(){var t=[];return(t.constructor={})[i]=function(){return{foo:1}},1!==t[e](Boolean).foo}))}},34194:(e,t,r)=>{"use strict";var n=r(95981);e.exports=function(e,t){var r=[][e];return!!r&&n((function(){r.call(null,t||function(){throw 1},1)}))}},46499:(e,t,r)=>{var n=r(21899),o=r(24883),a=r(89678),i=r(37026),s=r(10623),l=n.TypeError,u=function(e){return function(t,r,n,u){o(r);var c=a(t),p=i(c),f=s(c),h=e?f-1:0,d=e?-1:1;if(n<2)for(;;){if(h in p){u=p[h],h+=d;break}if(h+=d,e?h<0:f<=h)throw l("Reduce of empty array with no initial value")}for(;e?h>=0:f>h;h+=d)h in p&&(u=r(u,p[h],h,c));return u}};e.exports={left:u(!1),right:u(!0)}},15790:(e,t,r)=>{var n=r(21899),o=r(59413),a=r(10623),i=r(55449),s=n.Array,l=Math.max;e.exports=function(e,t,r){for(var n=a(e),u=o(t,n),c=o(void 0===r?n:r,n),p=s(l(c-u,0)),f=0;u{var n=r(95329);e.exports=n([].slice)},61388:(e,t,r)=>{var n=r(15790),o=Math.floor,a=function(e,t){var r=e.length,l=o(r/2);return r<8?i(e,t):s(e,a(n(e,0,l),t),a(n(e,l),t),t)},i=function(e,t){for(var r,n,o=e.length,a=1;a0;)e[n]=e[--n];n!==a++&&(e[n]=r)}return e},s=function(e,t,r,n){for(var o=t.length,a=r.length,i=0,s=0;i{var n=r(21899),o=r(1052),a=r(24284),i=r(10941),s=r(99813)("species"),l=n.Array;e.exports=function(e){var t;return o(e)&&(t=e.constructor,(a(t)&&(t===l||o(t.prototype))||i(t)&&null===(t=t[s]))&&(t=void 0)),void 0===t?l:t}},64692:(e,t,r)=>{var n=r(5693);e.exports=function(e,t){return new(n(e))(0===t?0:t)}},75196:(e,t,r)=>{var n=r(96059),o=r(7609);e.exports=function(e,t,r,a){try{return a?t(n(r)[0],r[1]):t(r)}catch(t){o(e,"throw",t)}}},21385:(e,t,r)=>{var n=r(99813)("iterator"),o=!1;try{var a=0,i={next:function(){return{done:!!a++}},return:function(){o=!0}};i[n]=function(){return this},Array.from(i,(function(){throw 2}))}catch(e){}e.exports=function(e,t){if(!t&&!o)return!1;var r=!1;try{var a={};a[n]=function(){return{next:function(){return{done:r=!0}}}},e(a)}catch(e){}return r}},82532:(e,t,r)=>{var n=r(95329),o=n({}.toString),a=n("".slice);e.exports=function(e){return a(o(e),8,-1)}},9697:(e,t,r)=>{var n=r(21899),o=r(22885),a=r(57475),i=r(82532),s=r(99813)("toStringTag"),l=n.Object,u="Arguments"==i(function(){return arguments}());e.exports=o?i:function(e){var t,r,n;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(r=function(e,t){try{return e[t]}catch(e){}}(t=l(e),s))?r:u?i(t):"Object"==(n=i(t))&&a(t.callee)?"Arguments":n}},38694:(e,t,r)=>{var n=r(95329)("".replace),o=String(Error("zxcasd").stack),a=/\n\s*at [^:]*:[^\n]*/,i=a.test(o);e.exports=function(e,t){if(i&&"string"==typeof e)for(;t--;)e=n(e,a,"");return e}},85616:(e,t,r)=>{"use strict";var n=r(65988).f,o=r(29290),a=r(87524),i=r(86843),s=r(5743),l=r(93091),u=r(47771),c=r(94431),p=r(55746),f=r(21647).fastKey,h=r(45402),d=h.set,m=h.getterFor;e.exports={getConstructor:function(e,t,r,u){var c=e((function(e,n){s(e,h),d(e,{type:t,index:o(null),first:void 0,last:void 0,size:0}),p||(e.size=0),null!=n&&l(n,e[u],{that:e,AS_ENTRIES:r})})),h=c.prototype,g=m(t),v=function(e,t,r){var n,o,a=g(e),i=y(e,t);return i?i.value=r:(a.last=i={index:o=f(t,!0),key:t,value:r,previous:n=a.last,next:void 0,removed:!1},a.first||(a.first=i),n&&(n.next=i),p?a.size++:e.size++,"F"!==o&&(a.index[o]=i)),e},y=function(e,t){var r,n=g(e),o=f(t);if("F"!==o)return n.index[o];for(r=n.first;r;r=r.next)if(r.key==t)return r};return a(h,{clear:function(){for(var e=g(this),t=e.index,r=e.first;r;)r.removed=!0,r.previous&&(r.previous=r.previous.next=void 0),delete t[r.index],r=r.next;e.first=e.last=void 0,p?e.size=0:this.size=0},delete:function(e){var t=this,r=g(t),n=y(t,e);if(n){var o=n.next,a=n.previous;delete r.index[n.index],n.removed=!0,a&&(a.next=o),o&&(o.previous=a),r.first==n&&(r.first=o),r.last==n&&(r.last=a),p?r.size--:t.size--}return!!n},forEach:function(e){for(var t,r=g(this),n=i(e,arguments.length>1?arguments[1]:void 0);t=t?t.next:r.first;)for(n(t.value,t.key,this);t&&t.removed;)t=t.previous},has:function(e){return!!y(this,e)}}),a(h,r?{get:function(e){var t=y(this,e);return t&&t.value},set:function(e,t){return v(this,0===e?0:e,t)}}:{add:function(e){return v(this,e=0===e?0:e,e)}}),p&&n(h,"size",{get:function(){return g(this).size}}),c},setStrong:function(e,t,r){var n=t+" Iterator",o=m(t),a=m(n);u(e,t,(function(e,t){d(this,{type:n,target:e,state:o(e),kind:t,last:void 0})}),(function(){for(var e=a(this),t=e.kind,r=e.last;r&&r.removed;)r=r.previous;return e.target&&(e.last=r=r?r.next:e.state.first)?"keys"==t?{value:r.key,done:!1}:"values"==t?{value:r.value,done:!1}:{value:[r.key,r.value],done:!1}:(e.target=void 0,{value:void 0,done:!0})}),r?"entries":"values",!r,!0),c(t)}}},8850:(e,t,r)=>{"use strict";var n=r(95329),o=r(87524),a=r(21647).getWeakData,i=r(96059),s=r(10941),l=r(5743),u=r(93091),c=r(3610),p=r(90953),f=r(45402),h=f.set,d=f.getterFor,m=c.find,g=c.findIndex,v=n([].splice),y=0,b=function(e){return e.frozen||(e.frozen=new w)},w=function(){this.entries=[]},E=function(e,t){return m(e.entries,(function(e){return e[0]===t}))};w.prototype={get:function(e){var t=E(this,e);if(t)return t[1]},has:function(e){return!!E(this,e)},set:function(e,t){var r=E(this,e);r?r[1]=t:this.entries.push([e,t])},delete:function(e){var t=g(this.entries,(function(t){return t[0]===e}));return~t&&v(this.entries,t,1),!!~t}},e.exports={getConstructor:function(e,t,r,n){var c=e((function(e,o){l(e,f),h(e,{type:t,id:y++,frozen:void 0}),null!=o&&u(o,e[n],{that:e,AS_ENTRIES:r})})),f=c.prototype,m=d(t),g=function(e,t,r){var n=m(e),o=a(i(t),!0);return!0===o?b(n).set(t,r):o[n.id]=r,e};return o(f,{delete:function(e){var t=m(this);if(!s(e))return!1;var r=a(e);return!0===r?b(t).delete(e):r&&p(r,t.id)&&delete r[t.id]},has:function(e){var t=m(this);if(!s(e))return!1;var r=a(e);return!0===r?b(t).has(e):r&&p(r,t.id)}}),o(f,r?{get:function(e){var t=m(this);if(s(e)){var r=a(e);return!0===r?b(t).get(e):r?r[t.id]:void 0}},set:function(e,t){return g(this,e,t)}}:{add:function(e){return g(this,e,!0)}}),c}}},24683:(e,t,r)=>{"use strict";var n=r(76887),o=r(21899),a=r(21647),i=r(95981),s=r(32029),l=r(93091),u=r(5743),c=r(57475),p=r(10941),f=r(90904),h=r(65988).f,d=r(3610).forEach,m=r(55746),g=r(45402),v=g.set,y=g.getterFor;e.exports=function(e,t,r){var g,b=-1!==e.indexOf("Map"),w=-1!==e.indexOf("Weak"),E=b?"set":"add",x=o[e],_=x&&x.prototype,S={};if(m&&c(x)&&(w||_.forEach&&!i((function(){(new x).entries().next()})))){var A=(g=t((function(t,r){v(u(t,A),{type:e,collection:new x}),null!=r&&l(r,t[E],{that:t,AS_ENTRIES:b})}))).prototype,k=y(e);d(["add","clear","delete","forEach","get","has","set","keys","values","entries"],(function(e){var t="add"==e||"set"==e;!(e in _)||w&&"clear"==e||s(A,e,(function(r,n){var o=k(this).collection;if(!t&&w&&!p(r))return"get"==e&&void 0;var a=o[e](0===r?0:r,n);return t?this:a}))})),w||h(A,"size",{configurable:!0,get:function(){return k(this).collection.size}})}else g=r.getConstructor(t,e,b,E),a.enable();return f(g,e,!1,!0),S[e]=g,n({global:!0,forced:!0},S),w||r.setStrong(g,e,b),g}},23489:(e,t,r)=>{var n=r(90953),o=r(31136),a=r(49677),i=r(65988);e.exports=function(e,t,r){for(var s=o(t),l=i.f,u=a.f,c=0;c{var n=r(99813)("match");e.exports=function(e){var t=/./;try{"/./"[e](t)}catch(r){try{return t[n]=!1,"/./"[e](t)}catch(e){}}return!1}},64160:(e,t,r)=>{var n=r(95981);e.exports=!n((function(){function e(){}return e.prototype.constructor=null,Object.getPrototypeOf(new e)!==e.prototype}))},31046:(e,t,r)=>{"use strict";var n=r(35143).IteratorPrototype,o=r(29290),a=r(31887),i=r(90904),s=r(12077),l=function(){return this};e.exports=function(e,t,r,u){var c=t+" Iterator";return e.prototype=o(n,{next:a(+!u,r)}),i(e,c,!1,!0),s[c]=l,e}},32029:(e,t,r)=>{var n=r(55746),o=r(65988),a=r(31887);e.exports=n?function(e,t,r){return o.f(e,t,a(1,r))}:function(e,t,r){return e[t]=r,e}},31887:e=>{e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},55449:(e,t,r)=>{"use strict";var n=r(83894),o=r(65988),a=r(31887);e.exports=function(e,t,r){var i=n(t);i in e?o.f(e,i,a(0,r)):e[i]=r}},47771:(e,t,r)=>{"use strict";var n=r(76887),o=r(78834),a=r(82529),i=r(79417),s=r(57475),l=r(31046),u=r(249),c=r(88929),p=r(90904),f=r(32029),h=r(99754),d=r(99813),m=r(12077),g=r(35143),v=i.PROPER,y=i.CONFIGURABLE,b=g.IteratorPrototype,w=g.BUGGY_SAFARI_ITERATORS,E=d("iterator"),x="keys",_="values",S="entries",A=function(){return this};e.exports=function(e,t,r,i,d,g,k){l(r,t,i);var C,O,j,I=function(e){if(e===d&&M)return M;if(!w&&e in P)return P[e];switch(e){case x:case _:case S:return function(){return new r(this,e)}}return function(){return new r(this)}},N=t+" Iterator",T=!1,P=e.prototype,R=P[E]||P["@@iterator"]||d&&P[d],M=!w&&R||I(d),D="Array"==t&&P.entries||R;if(D&&(C=u(D.call(new e)))!==Object.prototype&&C.next&&(a||u(C)===b||(c?c(C,b):s(C[E])||h(C,E,A)),p(C,N,!0,!0),a&&(m[N]=A)),v&&d==_&&R&&R.name!==_&&(!a&&y?f(P,"name",_):(T=!0,M=function(){return o(R,this)})),d)if(O={values:I(_),keys:g?M:I(x),entries:I(S)},k)for(j in O)(w||T||!(j in P))&&h(P,j,O[j]);else n({target:t,proto:!0,forced:w||T},O);return a&&!k||P[E]===M||h(P,E,M,{name:d}),m[t]=M,O}},66349:(e,t,r)=>{var n=r(54058),o=r(90953),a=r(11477),i=r(65988).f;e.exports=function(e){var t=n.Symbol||(n.Symbol={});o(t,e)||i(t,e,{value:a.f(e)})}},55746:(e,t,r)=>{var n=r(95981);e.exports=!n((function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}))},61333:(e,t,r)=>{var n=r(21899),o=r(10941),a=n.document,i=o(a)&&o(a.createElement);e.exports=function(e){return i?a.createElement(e):{}}},63281:e=>{e.exports={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0}},34342:(e,t,r)=>{var n=r(2861).match(/firefox\/(\d+)/i);e.exports=!!n&&+n[1]},23321:e=>{e.exports="object"==typeof window},81046:(e,t,r)=>{var n=r(2861);e.exports=/MSIE|Trident/.test(n)},4470:(e,t,r)=>{var n=r(2861),o=r(21899);e.exports=/ipad|iphone|ipod/i.test(n)&&void 0!==o.Pebble},22749:(e,t,r)=>{var n=r(2861);e.exports=/(?:ipad|iphone|ipod).*applewebkit/i.test(n)},6049:(e,t,r)=>{var n=r(82532),o=r(21899);e.exports="process"==n(o.process)},58045:(e,t,r)=>{var n=r(2861);e.exports=/web0s(?!.*chrome)/i.test(n)},2861:(e,t,r)=>{var n=r(626);e.exports=n("navigator","userAgent")||""},53385:(e,t,r)=>{var n,o,a=r(21899),i=r(2861),s=a.process,l=a.Deno,u=s&&s.versions||l&&l.version,c=u&&u.v8;c&&(o=(n=c.split("."))[0]>0&&n[0]<4?1:+(n[0]+n[1])),!o&&i&&(!(n=i.match(/Edge\/(\d+)/))||n[1]>=74)&&(n=i.match(/Chrome\/(\d+)/))&&(o=+n[1]),e.exports=o},18938:(e,t,r)=>{var n=r(2861).match(/AppleWebKit\/(\d+)\./);e.exports=!!n&&+n[1]},35703:(e,t,r)=>{var n=r(54058);e.exports=function(e){return n[e+"Prototype"]}},56759:e=>{e.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},18780:(e,t,r)=>{var n=r(95981),o=r(31887);e.exports=!n((function(){var e=Error("a");return!("stack"in e)||(Object.defineProperty(e,"stack",o(1,7)),7!==e.stack)}))},76887:(e,t,r)=>{"use strict";var n=r(21899),o=r(79730),a=r(95329),i=r(57475),s=r(49677).f,l=r(37252),u=r(54058),c=r(86843),p=r(32029),f=r(90953),h=function(e){var t=function(r,n,a){if(this instanceof t){switch(arguments.length){case 0:return new e;case 1:return new e(r);case 2:return new e(r,n)}return new e(r,n,a)}return o(e,this,arguments)};return t.prototype=e.prototype,t};e.exports=function(e,t){var r,o,d,m,g,v,y,b,w=e.target,E=e.global,x=e.stat,_=e.proto,S=E?n:x?n[w]:(n[w]||{}).prototype,A=E?u:u[w]||p(u,w,{})[w],k=A.prototype;for(d in t)r=!l(E?d:w+(x?".":"#")+d,e.forced)&&S&&f(S,d),g=A[d],r&&(v=e.noTargetGet?(b=s(S,d))&&b.value:S[d]),m=r&&v?v:t[d],r&&typeof g==typeof m||(y=e.bind&&r?c(m,n):e.wrap&&r?h(m):_&&i(m)?a(m):m,(e.sham||m&&m.sham||g&&g.sham)&&p(y,"sham",!0),p(A,d,y),_&&(f(u,o=w+"Prototype")||p(u,o,{}),p(u[o],d,m),e.real&&k&&!k[d]&&p(k,d,m)))}},95981:e=>{e.exports=function(e){try{return!!e()}catch(e){return!0}}},45602:(e,t,r)=>{var n=r(95981);e.exports=!n((function(){return Object.isExtensible(Object.preventExtensions({}))}))},79730:(e,t,r)=>{var n=r(18285),o=Function.prototype,a=o.apply,i=o.call;e.exports="object"==typeof Reflect&&Reflect.apply||(n?i.bind(a):function(){return i.apply(a,arguments)})},86843:(e,t,r)=>{var n=r(95329),o=r(24883),a=r(18285),i=n(n.bind);e.exports=function(e,t){return o(e),void 0===t?e:a?i(e,t):function(){return e.apply(t,arguments)}}},18285:(e,t,r)=>{var n=r(95981);e.exports=!n((function(){var e=function(){}.bind();return"function"!=typeof e||e.hasOwnProperty("prototype")}))},98308:(e,t,r)=>{"use strict";var n=r(21899),o=r(95329),a=r(24883),i=r(10941),s=r(90953),l=r(93765),u=r(18285),c=n.Function,p=o([].concat),f=o([].join),h={},d=function(e,t,r){if(!s(h,t)){for(var n=[],o=0;o{var n=r(18285),o=Function.prototype.call;e.exports=n?o.bind(o):function(){return o.apply(o,arguments)}},79417:(e,t,r)=>{var n=r(55746),o=r(90953),a=Function.prototype,i=n&&Object.getOwnPropertyDescriptor,s=o(a,"name"),l=s&&"something"===function(){}.name,u=s&&(!n||n&&i(a,"name").configurable);e.exports={EXISTS:s,PROPER:l,CONFIGURABLE:u}},95329:(e,t,r)=>{var n=r(18285),o=Function.prototype,a=o.bind,i=o.call,s=n&&a.bind(i,i);e.exports=n?function(e){return e&&s(e)}:function(e){return e&&function(){return i.apply(e,arguments)}}},626:(e,t,r)=>{var n=r(54058),o=r(21899),a=r(57475),i=function(e){return a(e)?e:void 0};e.exports=function(e,t){return arguments.length<2?i(n[e])||i(o[e]):n[e]&&n[e][t]||o[e]&&o[e][t]}},22902:(e,t,r)=>{var n=r(9697),o=r(14229),a=r(12077),i=r(99813)("iterator");e.exports=function(e){if(null!=e)return o(e,i)||o(e,"@@iterator")||a[n(e)]}},53476:(e,t,r)=>{var n=r(21899),o=r(78834),a=r(24883),i=r(96059),s=r(69826),l=r(22902),u=n.TypeError;e.exports=function(e,t){var r=arguments.length<2?l(e):t;if(a(r))return i(o(r,e));throw u(s(e)+" is not iterable")}},14229:(e,t,r)=>{var n=r(24883);e.exports=function(e,t){var r=e[t];return null==r?void 0:n(r)}},21899:(e,t,r)=>{var n=function(e){return e&&e.Math==Math&&e};e.exports=n("object"==typeof globalThis&&globalThis)||n("object"==typeof window&&window)||n("object"==typeof self&&self)||n("object"==typeof r.g&&r.g)||function(){return this}()||Function("return this")()},90953:(e,t,r)=>{var n=r(95329),o=r(89678),a=n({}.hasOwnProperty);e.exports=Object.hasOwn||function(e,t){return a(o(e),t)}},27748:e=>{e.exports={}},34845:(e,t,r)=>{var n=r(21899);e.exports=function(e,t){var r=n.console;r&&r.error&&(1==arguments.length?r.error(e):r.error(e,t))}},15463:(e,t,r)=>{var n=r(626);e.exports=n("document","documentElement")},2840:(e,t,r)=>{var n=r(55746),o=r(95981),a=r(61333);e.exports=!n&&!o((function(){return 7!=Object.defineProperty(a("div"),"a",{get:function(){return 7}}).a}))},37026:(e,t,r)=>{var n=r(21899),o=r(95329),a=r(95981),i=r(82532),s=n.Object,l=o("".split);e.exports=a((function(){return!s("z").propertyIsEnumerable(0)}))?function(e){return"String"==i(e)?l(e,""):s(e)}:s},81302:(e,t,r)=>{var n=r(95329),o=r(57475),a=r(63030),i=n(Function.toString);o(a.inspectSource)||(a.inspectSource=function(e){return i(e)}),e.exports=a.inspectSource},53794:(e,t,r)=>{var n=r(10941),o=r(32029);e.exports=function(e,t){n(t)&&"cause"in t&&o(e,"cause",t.cause)}},21647:(e,t,r)=>{var n=r(76887),o=r(95329),a=r(27748),i=r(10941),s=r(90953),l=r(65988).f,u=r(10946),c=r(684),p=r(91584),f=r(99418),h=r(45602),d=!1,m=f("meta"),g=0,v=function(e){l(e,m,{value:{objectID:"O"+g++,weakData:{}}})},y=e.exports={enable:function(){y.enable=function(){},d=!0;var e=u.f,t=o([].splice),r={};r[m]=1,e(r).length&&(u.f=function(r){for(var n=e(r),o=0,a=n.length;o{var n,o,a,i=r(38019),s=r(21899),l=r(95329),u=r(10941),c=r(32029),p=r(90953),f=r(63030),h=r(44262),d=r(27748),m="Object already initialized",g=s.TypeError,v=s.WeakMap;if(i||f.state){var y=f.state||(f.state=new v),b=l(y.get),w=l(y.has),E=l(y.set);n=function(e,t){if(w(y,e))throw new g(m);return t.facade=e,E(y,e,t),t},o=function(e){return b(y,e)||{}},a=function(e){return w(y,e)}}else{var x=h("state");d[x]=!0,n=function(e,t){if(p(e,x))throw new g(m);return t.facade=e,c(e,x,t),t},o=function(e){return p(e,x)?e[x]:{}},a=function(e){return p(e,x)}}e.exports={set:n,get:o,has:a,enforce:function(e){return a(e)?o(e):n(e,{})},getterFor:function(e){return function(t){var r;if(!u(t)||(r=o(t)).type!==e)throw g("Incompatible receiver, "+e+" required");return r}}}},6782:(e,t,r)=>{var n=r(99813),o=r(12077),a=n("iterator"),i=Array.prototype;e.exports=function(e){return void 0!==e&&(o.Array===e||i[a]===e)}},1052:(e,t,r)=>{var n=r(82532);e.exports=Array.isArray||function(e){return"Array"==n(e)}},57475:e=>{e.exports=function(e){return"function"==typeof e}},24284:(e,t,r)=>{var n=r(95329),o=r(95981),a=r(57475),i=r(9697),s=r(626),l=r(81302),u=function(){},c=[],p=s("Reflect","construct"),f=/^\s*(?:class|function)\b/,h=n(f.exec),d=!f.exec(u),m=function(e){if(!a(e))return!1;try{return p(u,c,e),!0}catch(e){return!1}},g=function(e){if(!a(e))return!1;switch(i(e)){case"AsyncFunction":case"GeneratorFunction":case"AsyncGeneratorFunction":return!1}try{return d||!!h(f,l(e))}catch(e){return!0}};g.sham=!0,e.exports=!p||o((function(){var e;return m(m.call)||!m(Object)||!m((function(){e=!0}))||e}))?g:m},37252:(e,t,r)=>{var n=r(95981),o=r(57475),a=/#|\.prototype\./,i=function(e,t){var r=l[s(e)];return r==c||r!=u&&(o(t)?n(t):!!t)},s=i.normalize=function(e){return String(e).replace(a,".").toLowerCase()},l=i.data={},u=i.NATIVE="N",c=i.POLYFILL="P";e.exports=i},10941:(e,t,r)=>{var n=r(57475);e.exports=function(e){return"object"==typeof e?null!==e:n(e)}},82529:e=>{e.exports=!0},60685:(e,t,r)=>{var n=r(10941),o=r(82532),a=r(99813)("match");e.exports=function(e){var t;return n(e)&&(void 0!==(t=e[a])?!!t:"RegExp"==o(e))}},56664:(e,t,r)=>{var n=r(21899),o=r(626),a=r(57475),i=r(7046),s=r(32302),l=n.Object;e.exports=s?function(e){return"symbol"==typeof e}:function(e){var t=o("Symbol");return a(t)&&i(t.prototype,l(e))}},93091:(e,t,r)=>{var n=r(21899),o=r(86843),a=r(78834),i=r(96059),s=r(69826),l=r(6782),u=r(10623),c=r(7046),p=r(53476),f=r(22902),h=r(7609),d=n.TypeError,m=function(e,t){this.stopped=e,this.result=t},g=m.prototype;e.exports=function(e,t,r){var n,v,y,b,w,E,x,_=r&&r.that,S=!(!r||!r.AS_ENTRIES),A=!(!r||!r.IS_ITERATOR),k=!(!r||!r.INTERRUPTED),C=o(t,_),O=function(e){return n&&h(n,"normal",e),new m(!0,e)},j=function(e){return S?(i(e),k?C(e[0],e[1],O):C(e[0],e[1])):k?C(e,O):C(e)};if(A)n=e;else{if(!(v=f(e)))throw d(s(e)+" is not iterable");if(l(v)){for(y=0,b=u(e);b>y;y++)if((w=j(e[y]))&&c(g,w))return w;return new m(!1)}n=p(e,v)}for(E=n.next;!(x=a(E,n)).done;){try{w=j(x.value)}catch(e){h(n,"throw",e)}if("object"==typeof w&&w&&c(g,w))return w}return new m(!1)}},7609:(e,t,r)=>{var n=r(78834),o=r(96059),a=r(14229);e.exports=function(e,t,r){var i,s;o(e);try{if(!(i=a(e,"return"))){if("throw"===t)throw r;return r}i=n(i,e)}catch(e){s=!0,i=e}if("throw"===t)throw r;if(s)throw i;return o(i),r}},35143:(e,t,r)=>{"use strict";var n,o,a,i=r(95981),s=r(57475),l=r(29290),u=r(249),c=r(99754),p=r(99813),f=r(82529),h=p("iterator"),d=!1;[].keys&&("next"in(a=[].keys())?(o=u(u(a)))!==Object.prototype&&(n=o):d=!0),null==n||i((function(){var e={};return n[h].call(e)!==e}))?n={}:f&&(n=l(n)),s(n[h])||c(n,h,(function(){return this})),e.exports={IteratorPrototype:n,BUGGY_SAFARI_ITERATORS:d}},12077:e=>{e.exports={}},10623:(e,t,r)=>{var n=r(43057);e.exports=function(e){return n(e.length)}},66132:(e,t,r)=>{var n,o,a,i,s,l,u,c,p=r(21899),f=r(86843),h=r(49677).f,d=r(42941).set,m=r(22749),g=r(4470),v=r(58045),y=r(6049),b=p.MutationObserver||p.WebKitMutationObserver,w=p.document,E=p.process,x=p.Promise,_=h(p,"queueMicrotask"),S=_&&_.value;S||(n=function(){var e,t;for(y&&(e=E.domain)&&e.exit();o;){t=o.fn,o=o.next;try{t()}catch(e){throw o?i():a=void 0,e}}a=void 0,e&&e.enter()},m||y||v||!b||!w?!g&&x&&x.resolve?((u=x.resolve(void 0)).constructor=x,c=f(u.then,u),i=function(){c(n)}):y?i=function(){E.nextTick(n)}:(d=f(d,p),i=function(){d(n)}):(s=!0,l=w.createTextNode(""),new b(n).observe(l,{characterData:!0}),i=function(){l.data=s=!s})),e.exports=S||function(e){var t={fn:e,next:void 0};a&&(a.next=t),o||(o=t,i()),a=t}},19297:(e,t,r)=>{var n=r(21899);e.exports=n.Promise},72497:(e,t,r)=>{var n=r(53385),o=r(95981);e.exports=!!Object.getOwnPropertySymbols&&!o((function(){var e=Symbol();return!String(e)||!(Object(e)instanceof Symbol)||!Symbol.sham&&n&&n<41}))},28468:(e,t,r)=>{var n=r(95981),o=r(99813),a=r(82529),i=o("iterator");e.exports=!n((function(){var e=new URL("b?a=1&b=2&c=3","http://a"),t=e.searchParams,r="";return e.pathname="c%20d",t.forEach((function(e,n){t.delete("b"),r+=n+e})),a&&!e.toJSON||!t.sort||"http://a/c%20d?a=1&c=3"!==e.href||"3"!==t.get("c")||"a=1"!==String(new URLSearchParams("?a=1"))||!t[i]||"a"!==new URL("https://a@b").username||"b"!==new URLSearchParams(new URLSearchParams("a=b")).get("a")||"xn--e1aybc"!==new URL("http://тест").host||"#%D0%B1"!==new URL("http://a#б").hash||"a1c3"!==r||"x"!==new URL("http://x",void 0).host}))},38019:(e,t,r)=>{var n=r(21899),o=r(57475),a=r(81302),i=n.WeakMap;e.exports=o(i)&&/native code/.test(a(i))},69520:(e,t,r)=>{"use strict";var n=r(24883),o=function(e){var t,r;this.promise=new e((function(e,n){if(void 0!==t||void 0!==r)throw TypeError("Bad Promise constructor");t=e,r=n})),this.resolve=n(t),this.reject=n(r)};e.exports.f=function(e){return new o(e)}},14649:(e,t,r)=>{var n=r(85803);e.exports=function(e,t){return void 0===e?arguments.length<2?"":t:n(e)}},70344:(e,t,r)=>{var n=r(21899),o=r(60685),a=n.TypeError;e.exports=function(e){if(o(e))throw a("The method doesn't accept regular expressions");return e}},24420:(e,t,r)=>{"use strict";var n=r(55746),o=r(95329),a=r(78834),i=r(95981),s=r(14771),l=r(87857),u=r(36760),c=r(89678),p=r(37026),f=Object.assign,h=Object.defineProperty,d=o([].concat);e.exports=!f||i((function(){if(n&&1!==f({b:1},f(h({},"a",{enumerable:!0,get:function(){h(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var e={},t={},r=Symbol(),o="abcdefghijklmnopqrst";return e[r]=7,o.split("").forEach((function(e){t[e]=e})),7!=f({},e)[r]||s(f({},t)).join("")!=o}))?function(e,t){for(var r=c(e),o=arguments.length,i=1,f=l.f,h=u.f;o>i;)for(var m,g=p(arguments[i++]),v=f?d(s(g),f(g)):s(g),y=v.length,b=0;y>b;)m=v[b++],n&&!a(h,g,m)||(r[m]=g[m]);return r}:f},29290:(e,t,r)=>{var n,o=r(96059),a=r(59938),i=r(56759),s=r(27748),l=r(15463),u=r(61333),c=r(44262),p=c("IE_PROTO"),f=function(){},h=function(e){return"