From 7dce2698f7f73a35d759ae4ef31322dff00d3614 Mon Sep 17 00:00:00 2001 From: Ivan Kozlovic Date: Mon, 2 Aug 2021 13:41:06 -0600 Subject: [PATCH] Update dependencies Signed-off-by: Ivan Kozlovic --- .goreleaser.yml | 2 +- go.mod | 16 +- go.sum | 39 +- .../hashicorp/go-hclog/intlogger.go | 3 + .../github.com/hashicorp/go-hclog/logger.go | 6 + .../github.com/hashicorp/go-hclog/stdlog.go | 2 +- vendor/github.com/nats-io/jwt/v2/exports.go | 2 +- vendor/github.com/nats-io/jwt/v2/header.go | 2 +- vendor/github.com/nats-io/jwt/v2/types.go | 11 +- .../nats-io/nats-server/v2/server/accounts.go | 132 +- .../nats-io/nats-server/v2/server/auth.go | 2 +- .../nats-io/nats-server/v2/server/client.go | 221 ++- .../nats-io/nats-server/v2/server/const.go | 4 +- .../nats-io/nats-server/v2/server/consumer.go | 324 ++-- .../nats-io/nats-server/v2/server/errors.go | 44 +- .../nats-io/nats-server/v2/server/errors.json | 1052 +++++++++++ .../nats-io/nats-server/v2/server/events.go | 320 ++-- .../nats-server/v2/server/filestore.go | 1579 ++++++++++++++--- .../nats-io/nats-server/v2/server/fuzz.go | 8 +- .../nats-io/nats-server/v2/server/gateway.go | 19 +- .../nats-server/v2/server/jetstream.go | 306 ++-- .../nats-server/v2/server/jetstream_api.go | 620 ++++--- .../v2/server/jetstream_cluster.go | 228 ++- .../nats-server/v2/server/jetstream_errors.go | 114 ++ .../v2/server/jetstream_errors_generated.go | 452 +++++ .../nats-io/nats-server/v2/server/jwt.go | 2 +- .../nats-io/nats-server/v2/server/leafnode.go | 252 +-- .../nats-io/nats-server/v2/server/memstore.go | 229 ++- .../nats-io/nats-server/v2/server/monitor.go | 49 +- .../nats-io/nats-server/v2/server/mqtt.go | 90 +- .../nats-io/nats-server/v2/server/ocsp.go | 797 +++++++++ .../nats-io/nats-server/v2/server/opts.go | 112 +- .../nats-io/nats-server/v2/server/parser.go | 2 +- .../nats-server/v2/server/pse/pse_darwin.go | 80 +- .../nats-io/nats-server/v2/server/raft.go | 25 +- .../nats-io/nats-server/v2/server/reload.go | 111 +- .../nats-io/nats-server/v2/server/route.go | 7 +- .../nats-io/nats-server/v2/server/sendq.go | 3 +- .../nats-io/nats-server/v2/server/server.go | 49 +- .../nats-io/nats-server/v2/server/store.go | 24 +- .../nats-io/nats-server/v2/server/stream.go | 313 ++-- .../nats-io/nats-server/v2/server/sublist.go | 42 +- .../nats-server/v2/server/websocket.go | 133 +- vendor/github.com/nats-io/nats.go/go_test.mod | 2 +- vendor/github.com/nats-io/nats.go/go_test.sum | 62 +- vendor/github.com/nats-io/nats.go/js.go | 152 +- vendor/github.com/nats-io/nats.go/jsm.go | 7 + vendor/github.com/nats-io/nats.go/nats.go | 13 +- vendor/github.com/nats-io/nats.go/ws.go | 157 +- vendor/github.com/nats-io/stan.go/README.md | 6 +- .../github.com/nats-io/stan.go/go_tests.mod | 6 +- .../github.com/nats-io/stan.go/go_tests.sum | 40 +- vendor/github.com/nats-io/stan.go/stan.go | 38 +- vendor/github.com/prometheus/procfs/Makefile | 2 + .../prometheus/procfs/Makefile.common | 15 +- vendor/github.com/prometheus/procfs/README.md | 4 +- .../github.com/prometheus/procfs/cmdline.go | 30 + vendor/github.com/prometheus/procfs/doc.go | 2 +- .../prometheus/procfs/fixtures.ttar | 1267 ++++++++++++- vendor/github.com/prometheus/procfs/mdstat.go | 105 +- .../prometheus/procfs/net_ip_socket.go | 10 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../github.com/prometheus/procfs/proc_stat.go | 27 + .../github.com/prometheus/procfs/zoneinfo.go | 1 - vendor/go.etcd.io/bbolt/.gitignore | 2 + vendor/go.etcd.io/bbolt/.travis.yml | 3 +- vendor/go.etcd.io/bbolt/Makefile | 2 - vendor/go.etcd.io/bbolt/README.md | 5 +- vendor/go.etcd.io/bbolt/bolt_unix.go | 17 +- vendor/go.etcd.io/bbolt/compact.go | 114 ++ vendor/go.etcd.io/bbolt/db.go | 66 +- vendor/go.etcd.io/bbolt/freelist_hmap.go | 6 +- vendor/go.etcd.io/bbolt/go.mod | 2 +- vendor/go.etcd.io/bbolt/go.sum | 4 +- vendor/go.etcd.io/bbolt/mlock_unix.go | 36 + vendor/go.etcd.io/bbolt/mlock_windows.go | 11 + vendor/go.etcd.io/bbolt/tx.go | 3 +- vendor/golang.org/x/crypto/ocsp/ocsp.go | 789 ++++++++ vendor/golang.org/x/sys/unix/README.md | 6 +- vendor/golang.org/x/sys/unix/asm_bsd_386.s | 4 +- vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 7 +- .../golang.org/x/sys/unix/syscall_darwin.go | 33 + vendor/golang.org/x/sys/unix/syscall_linux.go | 71 + .../x/sys/unix/syscall_linux_386.go | 4 + .../x/sys/unix/syscall_linux_amd64.go | 4 + .../x/sys/unix/syscall_linux_arm.go | 4 + .../x/sys/unix/syscall_linux_arm64.go | 4 + .../x/sys/unix/syscall_linux_mips64x.go | 4 + .../x/sys/unix/syscall_linux_mipsx.go | 4 + .../x/sys/unix/syscall_linux_ppc.go | 4 + .../x/sys/unix/syscall_linux_ppc64x.go | 4 + .../x/sys/unix/syscall_linux_riscv64.go | 4 + .../x/sys/unix/syscall_linux_s390x.go | 4 + .../x/sys/unix/syscall_linux_sparc64.go | 4 + .../x/sys/unix/zerrors_darwin_amd64.go | 5 + .../x/sys/unix/zerrors_darwin_arm64.go | 5 + .../x/sys/unix/zerrors_freebsd_386.go | 5 + .../x/sys/unix/zerrors_freebsd_amd64.go | 5 + .../x/sys/unix/zerrors_freebsd_arm.go | 5 + .../x/sys/unix/zerrors_freebsd_arm64.go | 5 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 64 + .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../x/sys/unix/ztypes_darwin_amd64.go | 104 ++ .../x/sys/unix/ztypes_darwin_arm64.go | 104 ++ .../x/sys/unix/ztypes_dragonfly_amd64.go | 3 + .../x/sys/unix/ztypes_freebsd_386.go | 5 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 5 +- .../x/sys/unix/ztypes_freebsd_arm.go | 5 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 5 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 79 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 18 +- .../x/sys/unix/ztypes_linux_amd64.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 18 +- .../x/sys/unix/ztypes_linux_arm64.go | 18 +- .../x/sys/unix/ztypes_linux_mips.go | 18 +- .../x/sys/unix/ztypes_linux_mips64.go | 18 +- .../x/sys/unix/ztypes_linux_mips64le.go | 18 +- .../x/sys/unix/ztypes_linux_mipsle.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 18 +- .../x/sys/unix/ztypes_linux_ppc64.go | 18 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 18 +- .../x/sys/unix/ztypes_linux_riscv64.go | 18 +- .../x/sys/unix/ztypes_linux_s390x.go | 18 +- .../x/sys/unix/ztypes_linux_sparc64.go | 18 +- .../x/sys/unix/ztypes_netbsd_386.go | 4 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm.go | 4 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 4 +- .../x/sys/unix/ztypes_openbsd_386.go | 4 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 4 +- .../x/sys/unix/ztypes_openbsd_arm.go | 4 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 4 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 4 +- .../x/sys/windows/syscall_windows.go | 2 +- .../golang.org/x/sys/windows/types_windows.go | 2 +- .../x/sys/windows/zsyscall_windows.go | 26 +- vendor/modules.txt | 19 +- 151 files changed, 9657 insertions(+), 2103 deletions(-) create mode 100644 vendor/github.com/nats-io/nats-server/v2/server/errors.json create mode 100644 vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors.go create mode 100644 vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go create mode 100644 vendor/github.com/nats-io/nats-server/v2/server/ocsp.go create mode 100644 vendor/github.com/prometheus/procfs/cmdline.go create mode 100644 vendor/go.etcd.io/bbolt/compact.go create mode 100644 vendor/go.etcd.io/bbolt/mlock_unix.go create mode 100644 vendor/go.etcd.io/bbolt/mlock_windows.go create mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go diff --git a/.goreleaser.yml b/.goreleaser.yml index 85775533..d4ef3134 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -14,7 +14,7 @@ builds: - main: ./nats-streaming-server.go binary: nats-streaming-server ldflags: - - -s -w -X github.com/nats-io/nats-streaming-server/server.gitCommit={{.ShortCommit}} -X github.com/nats-io/nats-streaming-server/vendor/github.com/nats-io/nats-server/v2/server.gitCommit=cf433ae + - -w -X github.com/nats-io/nats-streaming-server/server.gitCommit={{.ShortCommit}} -X github.com/nats-io/nats-streaming-server/vendor/github.com/nats-io/nats-server/v2/server.gitCommit=aaba459 env: - GO111MODULE=off - CGO_ENABLED=0 diff --git a/go.mod b/go.mod index 71d2b330..21f1ef80 100644 --- a/go.mod +++ b/go.mod @@ -5,16 +5,16 @@ go 1.14 require ( github.com/go-sql-driver/mysql v1.6.0 github.com/gogo/protobuf v1.3.2 - github.com/hashicorp/go-hclog v0.16.1 + github.com/hashicorp/go-hclog v0.16.2 github.com/hashicorp/go-msgpack v1.1.5 github.com/hashicorp/raft v1.3.1 github.com/lib/pq v1.10.2 - github.com/nats-io/nats-server/v2 v2.2.6 - github.com/nats-io/nats.go v1.11.0 + github.com/nats-io/nats-server/v2 v2.3.3 + github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 github.com/nats-io/nuid v1.0.1 - github.com/nats-io/stan.go v0.9.0 - github.com/prometheus/procfs v0.6.0 - go.etcd.io/bbolt v1.3.5 - golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a - golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea + github.com/nats-io/stan.go v0.10.0 + github.com/prometheus/procfs v0.7.1 + go.etcd.io/bbolt v1.3.6 + golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 + golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c ) diff --git a/go.sum b/go.sum index 0882adc7..0edf3f3d 100644 --- a/go.sum +++ b/go.sum @@ -28,8 +28,8 @@ github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.1 h1:IVQwpTGNRRIHafnTs2dQLIk4ENtneRIEEJWOVDqz99o= -github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -58,19 +58,20 @@ github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= -github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= -github.com/nats-io/nats-server/v2 v2.2.6 h1:FPK9wWx9pagxcw14s8W9rlfzfyHm61uNLnJyybZbn48= -github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= -github.com/nats-io/nats.go v1.11.0 h1:L263PZkrmkRJRJT2YHU8GwWWvEvmr9/LUKuJTXsF32k= +github.com/nats-io/jwt/v2 v2.0.3 h1:i/O6cmIsjpcQyWDYNcq2JyZ3/VTF8SJ4JWluI5OhpvI= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.3.3 h1:4++7wFzqYwATPWN1FD9l492TGxdtzDoT0moz2yh7BWg= +github.com/nats-io/nats-server/v2 v2.3.3/go.mod h1:3mtbaN5GkCo/Z5T3nNj0I0/W1fPkKzLiDC6jjWJKp98= github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 h1:9GqilBhZaR3xYis0JgMlJjNw933WIobdjKhilXm+Vls= +github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nats-io/stan.go v0.9.0 h1:TB73Y31au++0sU0VmnBy2pYkSrwH0zUFNRB9YePHqC4= -github.com/nats-io/stan.go v0.9.0/go.mod h1:0jEuBXKauB1HHJswHM/lx05K48TJ1Yxj6VIfM4k+aB4= +github.com/nats-io/stan.go v0.10.0 h1:trLFZNWJ3bLpD3dxEv5kFNBPsc+QqygjfDOfqh3hqg4= +github.com/nats-io/stan.go v0.10.0/go.mod h1:0jEuBXKauB1HHJswHM/lx05K48TJ1Yxj6VIfM4k+aB4= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -80,8 +81,8 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.1 h1:TlEtJq5GvGqMykEwWzbZWjjztF86swFhsPix1i0bkgA= +github.com/prometheus/procfs v0.7.1/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= @@ -89,15 +90,16 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -117,12 +119,13 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea h1:+WiDlPBBaO+h9vPNZi8uJ3k4BkKQB7Iow3aqwHVA5hI= -golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 6099e672..d491ae8f 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -295,6 +295,9 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, continue FOR case Format: val = fmt.Sprintf(st[0].(string), st[1:]...) + case Quote: + raw = true + val = strconv.Quote(string(st)) default: v := reflect.ValueOf(st) if v.Kind() == reflect.Slice { diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 7f36b1fd..6a4665ba 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -67,6 +67,12 @@ type Octal int // text output. For example: L.Info("bits", Binary(17)) type Binary int +// A simple shortcut to format strings with Go quoting. Control and +// non-printable characters will be escaped with their backslash equivalents in +// output. Intended for untrusted or multiline strings which should be logged +// as concisely as possible. +type Quote string + // ColorOption expresses how the output should be colored, if at all. type ColorOption uint8 diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index f35d875d..271d546d 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -64,7 +64,7 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { case strings.HasPrefix(str, "[INFO]"): return Info, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[WARN]"): - return Warn, strings.TrimSpace(str[7:]) + return Warn, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[ERROR]"): return Error, strings.TrimSpace(str[7:]) case strings.HasPrefix(str, "[ERR]"): diff --git a/vendor/github.com/nats-io/jwt/v2/exports.go b/vendor/github.com/nats-io/jwt/v2/exports.go index 852ffae3..24715f7c 100644 --- a/vendor/github.com/nats-io/jwt/v2/exports.go +++ b/vendor/github.com/nats-io/jwt/v2/exports.go @@ -42,7 +42,7 @@ const ( // Results is the subject where the latency metrics are published. // A metric will be defined by the nats-server's ServiceLatency. Time durations // are in nanoseconds. -// see https://github.com/nats-io/nats-server/blob/master/server/accounts.go#L524 +// see https://github.com/nats-io/nats-server/blob/main/server/accounts.go#L524 // e.g. // { // "app": "dlc22", diff --git a/vendor/github.com/nats-io/jwt/v2/header.go b/vendor/github.com/nats-io/jwt/v2/header.go index 2fd587ca..f69647d8 100644 --- a/vendor/github.com/nats-io/jwt/v2/header.go +++ b/vendor/github.com/nats-io/jwt/v2/header.go @@ -23,7 +23,7 @@ import ( const ( // Version is semantic version. - Version = "2.0.2" + Version = "2.0.3" // TokenTypeJwt is the JWT token type supported JWT tokens // encoded and decoded by this library diff --git a/vendor/github.com/nats-io/jwt/v2/types.go b/vendor/github.com/nats-io/jwt/v2/types.go index b538b62a..92abcf53 100644 --- a/vendor/github.com/nats-io/jwt/v2/types.go +++ b/vendor/github.com/nats-io/jwt/v2/types.go @@ -188,12 +188,11 @@ func (s Subject) countTokenWildcards() int { if v == "*" { return 1 } - cnt := strings.Count(v, ".*.") - if strings.HasSuffix(v, ".*") { - cnt++ - } - if strings.HasPrefix(v, "*.") { - cnt++ + cnt := 0 + for _, t := range strings.Split(v, ".") { + if t == "*" { + cnt++ + } } return cnt } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/accounts.go b/vendor/github.com/nats-io/nats-server/v2/server/accounts.go index 30dcb025..9fe4a81c 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/accounts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/accounts.go @@ -39,6 +39,10 @@ import ( // account will be grouped in the default global account. const globalAccountName = DEFAULT_GLOBAL_ACCOUNT +const defaultMaxSubLimitReportThreshold = int64(2 * time.Second) + +var maxSubLimitReportThreshold = defaultMaxSubLimitReportThreshold + // Account are subject namespace definitions. By default no messages are shared between accounts. // You can share via Exports and Imports of Streams and Services. type Account struct { @@ -84,6 +88,7 @@ type Account struct { defaultPerms *Permissions tags jwt.TagList nameTag string + lastLimErr int64 } // Account based limits. @@ -505,6 +510,22 @@ func (a *Account) TotalSubs() int { return int(a.sl.Count()) } +func (a *Account) shouldLogMaxSubErr() bool { + if a == nil { + return true + } + a.mu.RLock() + last := a.lastLimErr + a.mu.RUnlock() + if now := time.Now().UnixNano(); now-last >= maxSubLimitReportThreshold { + a.mu.Lock() + a.lastLimErr = now + a.mu.Unlock() + return true + } + return false +} + // MapDest is for mapping published subjects for clients. type MapDest struct { Subject string `json:"subject"` @@ -1604,9 +1625,8 @@ func (a *Account) checkForReverseEntries(reply string, checkInterest bool) { return } - var _rs [32]string + var _rs [64]string rs := _rs[:0] - for k := range a.imports.rrMap { if subjectIsSubsetMatch(k, reply) { rs = append(rs, k) @@ -1629,8 +1649,14 @@ func (a *Account) checkForReverseEntry(reply string, si *serviceImport, checkInt } if subjectHasWildcard(reply) { + doInline := len(a.imports.rrMap) <= 64 a.mu.RUnlock() - go a.checkForReverseEntries(reply, checkInterest) + + if doInline { + a.checkForReverseEntries(reply, checkInterest) + } else { + go a.checkForReverseEntries(reply, checkInterest) + } return } @@ -1843,12 +1869,19 @@ func (a *Account) addServiceImportSub(si *serviceImport) error { subject := si.from a.mu.Unlock() - cb := func(sub *subscription, c *client, subject, reply string, msg []byte) { - c.processServiceImport(si, a, msg) + cb := func(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) { + c.processServiceImport(si, acc, msg) } - _, err := c.processSubEx([]byte(subject), nil, []byte(sid), cb, true, true, false) - - return err + sub, err := c.processSubEx([]byte(subject), nil, []byte(sid), cb, true, true, false) + if err != nil { + return err + } + // Leafnodes introduce a new way to introduce messages into the system. Therefore forward import subscription + // This is similar to what initLeafNodeSmapAndSendSubs does + // TODO we need to consider performing this update as we get client subscriptions. + // This behavior would result in subscription propagation only where actually used. + a.srv.updateLeafNodes(a, sub, 1) + return nil } // Remove all the subscriptions associated with service imports. @@ -2005,7 +2038,7 @@ const ( ) // This is where all service export responses are handled. -func (a *Account) processServiceImportResponse(sub *subscription, c *client, subject, reply string, msg []byte) { +func (a *Account) processServiceImportResponse(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { a.mu.RLock() if a.expired || len(a.exports.responses) == 0 { a.mu.RUnlock() @@ -2032,7 +2065,7 @@ func (a *Account) createRespWildcard() []byte { a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } var b = [baseServerLen]byte{'_', 'R', '_', '.'} - rn := a.prand.Int63() + rn := a.prand.Uint64() for i, l := replyPrefixLen, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base @@ -2060,17 +2093,19 @@ func isTrackedReply(reply []byte) bool { // Generate a new service reply from the wildcard prefix. // FIXME(dlc) - probably do not have to use rand here. about 25ns per. func (a *Account) newServiceReply(tracking bool) []byte { - a.mu.RLock() - replyPre := a.siReply - s := a.srv - a.mu.RUnlock() + a.mu.Lock() + s, replyPre := a.srv, a.siReply + if a.prand == nil { + a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) + } + rn := a.prand.Uint64() + a.mu.Unlock() if replyPre == nil { replyPre = a.createRespWildcard() } var b [replyLen]byte - rn := a.prand.Int63() for i, l := 0, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base @@ -2822,6 +2857,16 @@ func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) { s.updateAccountClaimsWithRefresh(a, ac, true) } +func (a *Account) traceLabel() string { + if a == nil { + return _EMPTY_ + } + if a.nameTag != _EMPTY_ { + return fmt.Sprintf("%s/%s", a.Name, a.nameTag) + } + return a.Name +} + // updateAccountClaimsWithRefresh will update an existing account with new claims. // If refreshImportingAccounts is true it will also update incomplete dependent accounts // This will replace any exports or imports previously defined. @@ -2926,25 +2971,19 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim jsEnabled := s.JetStreamEnabled() if jsEnabled && a == s.SystemAccount() { - for _, export := range allJsExports { - s.Debugf("Adding jetstream service export %q for %s", export, a.Name) - if err := a.AddServiceExport(export, nil); err != nil { - s.Errorf("Error setting up jetstream service exports: %v", err) - } - } s.checkJetStreamExports() } for _, e := range ac.Exports { switch e.Type { case jwt.Stream: - s.Debugf("Adding stream export %q for %s", e.Subject, a.Name) + s.Debugf("Adding stream export %q for %s", e.Subject, a.traceLabel()) if err := a.addStreamExportWithAccountPos( string(e.Subject), authAccounts(e.TokenReq), e.AccountTokenPosition); err != nil { - s.Debugf("Error adding stream export to account [%s]: %v", a.Name, err.Error()) + s.Debugf("Error adding stream export to account [%s]: %v", a.traceLabel(), err.Error()) } case jwt.Service: - s.Debugf("Adding service export %q for %s", e.Subject, a.Name) + s.Debugf("Adding service export %q for %s", e.Subject, a.traceLabel()) rt := Singleton switch e.ResponseType { case jwt.ResponseTypeStream: @@ -2954,7 +2993,7 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim } if err := a.addServiceExportWithResponseAndAccountPos( string(e.Subject), rt, authAccounts(e.TokenReq), e.AccountTokenPosition); err != nil { - s.Debugf("Error adding service export to account [%s]: %v", a.Name, err) + s.Debugf("Error adding service export to account [%s]: %v", a.traceLabel(), err) continue } sub := string(e.Subject) @@ -2964,13 +3003,13 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim if e.Latency.Sampling == jwt.Headers { hdrNote = " (using headers)" } - s.Debugf("Error adding latency tracking%s for service export to account [%s]: %v", hdrNote, a.Name, err) + s.Debugf("Error adding latency tracking%s for service export to account [%s]: %v", hdrNote, a.traceLabel(), err) } } if e.ResponseThreshold != 0 { // Response threshold was set in options. if err := a.SetServiceExportResponseThreshold(sub, e.ResponseThreshold); err != nil { - s.Debugf("Error adding service export response threshold for [%s]: %v", a.Name, err) + s.Debugf("Error adding service export response threshold for [%s]: %v", a.traceLabel(), err) } } } @@ -3008,14 +3047,14 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim if i.LocalSubject != _EMPTY_ { // set local subject implies to is empty to = string(i.LocalSubject) - s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, from, a.Name, to) + s.Debugf("Adding stream import %s:%q for %s:%q", acc.traceLabel(), from, a.traceLabel(), to) err = a.AddMappedStreamImportWithClaim(acc, from, to, i) } else { - s.Debugf("Adding stream import %s:%q for %s:%q", acc.Name, from, a.Name, to) + s.Debugf("Adding stream import %s:%q for %s:%q", acc.traceLabel(), from, a.traceLabel(), to) err = a.AddStreamImportWithClaim(acc, from, to, i) } if err != nil { - s.Debugf("Error adding stream import to account [%s]: %v", a.Name, err.Error()) + s.Debugf("Error adding stream import to account [%s]: %v", a.traceLabel(), err.Error()) incompleteImports = append(incompleteImports, i) } case jwt.Service: @@ -3023,9 +3062,9 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim from = string(i.LocalSubject) to = string(i.Subject) } - s.Debugf("Adding service import %s:%q for %s:%q", acc.Name, from, a.Name, to) + s.Debugf("Adding service import %s:%q for %s:%q", acc.traceLabel(), from, a.traceLabel(), to) if err := a.AddServiceImportWithClaim(acc, from, to, i); err != nil { - s.Debugf("Error adding service import to account [%s]: %v", a.Name, err.Error()) + s.Debugf("Error adding service import to account [%s]: %v", a.traceLabel(), err.Error()) incompleteImports = append(incompleteImports, i) } } @@ -3168,7 +3207,7 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim // regardless of enabled or disabled. It handles both cases. if jsEnabled { if err := s.configJetStream(a); err != nil { - s.Errorf("Error configuring jetstream for account [%s]: %v", a.Name, err.Error()) + s.Errorf("Error configuring jetstream for account [%s]: %v", a.traceLabel(), err.Error()) a.mu.Lock() // Absent reload of js server cfg, this is going to be broken until js is disabled a.incomplete = true @@ -3235,6 +3274,7 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim acc.mu.RLock() incomplete := acc.incomplete name := acc.Name + label := acc.traceLabel() // Must use jwt in account or risk failing on fetch // This jwt may not be the same that caused exportingAcc to be in incompleteAccExporterMap claimJWT := acc.claimJWT @@ -3250,7 +3290,7 @@ func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaim // Since this account just got updated, the import itself may be in error. So trace that. if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok { s.incompleteAccExporterMap.Delete(old.Name) - s.Errorf("Account %s has issues importing account %s", name, old.Name) + s.Errorf("Account %s has issues importing account %s", label, old.Name) } } } @@ -3639,7 +3679,7 @@ func (dr *DirAccResolver) Start(s *Server) error { packRespIb := s.newRespInbox() for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests - if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) { + if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { pubKey := "" tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { @@ -3669,7 +3709,7 @@ func (dr *DirAccResolver) Start(s *Server) error { return fmt.Errorf("error setting up update handling: %v", err) } } - if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) { + if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update resulted in error", err) } else if claim.Issuer == op && strict { @@ -3686,7 +3726,7 @@ func (dr *DirAccResolver) Start(s *Server) error { return fmt.Errorf("error setting up update handling: %v", err) } // respond to lookups with our version - if _, err := s.sysSubscribe(fmt.Sprintf(accLookupReqSubj, "*"), func(_ *subscription, _ *client, subj, reply string, msg []byte) { + if _, err := s.sysSubscribe(fmt.Sprintf(accLookupReqSubj, "*"), func(_ *subscription, _ *client, _ *Account, subj, reply string, msg []byte) { if reply == "" { return } @@ -3704,7 +3744,7 @@ func (dr *DirAccResolver) Start(s *Server) error { } // respond to pack requests with one or more pack messages // an empty message signifies the end of the response responder - if _, err := s.sysSubscribeQ(accPackReqSubj, "responder", func(_ *subscription, _ *client, _, reply string, theirHash []byte) { + if _, err := s.sysSubscribeQ(accPackReqSubj, "responder", func(_ *subscription, _ *client, _ *Account, _, reply string, theirHash []byte) { if reply == "" { return } @@ -3725,18 +3765,18 @@ func (dr *DirAccResolver) Start(s *Server) error { return fmt.Errorf("error setting up pack request handling: %v", err) } // respond to list requests with one message containing all account ids - if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) { + if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } - if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) { + if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up delete request handling: %v", err) } // embed pack responses into store - if _, err := s.sysSubscribe(packRespIb, func(_ *subscription, _ *client, _, _ string, msg []byte) { + if _, err := s.sysSubscribe(packRespIb, func(_ *subscription, _ *client, _ *Account, _, _ string, msg []byte) { hash := dr.DirJWTStore.Hash() if len(msg) == 0 { // end of response stream s.Debugf("Merging Finished and resulting in: %x", dr.DirJWTStore.Hash()) @@ -3854,7 +3894,7 @@ func (s *Server) fetch(res AccountResolver, name string, timeout time.Duration) replySubj := s.newRespInbox() replies := s.sys.replies // Store our handler. - replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) { + replies[replySubj] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { clone := make([]byte, len(msg)) copy(clone, msg) s.mu.Lock() @@ -3925,7 +3965,7 @@ func (dr *CacheDirAccResolver) Start(s *Server) error { } for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests - if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, subj, resp string, msg []byte) { + if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { pubKey := "" tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { @@ -3957,7 +3997,7 @@ func (dr *CacheDirAccResolver) Start(s *Server) error { return fmt.Errorf("error setting up update handling: %v", err) } } - if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, subj, resp string, msg []byte) { + if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update cache resulted in error", err) } else if claim.Issuer == op && strict { @@ -3976,12 +4016,12 @@ func (dr *CacheDirAccResolver) Start(s *Server) error { return fmt.Errorf("error setting up update handling: %v", err) } // respond to list requests with one message containing all account ids - if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _, reply string, _ []byte) { + if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } - if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _, reply string, msg []byte) { + if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/auth.go b/vendor/github.com/nats-io/nats-server/v2/server/auth.go index b137959b..d6e7fbfe 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/auth.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/auth.go @@ -671,7 +671,7 @@ func (s *Server) processClientOrLeafAuthentication(c *client, opts *Options) boo acc.mu.RLock() c.Debugf("Authenticated JWT: %s %q (claim-name: %q, claim-tags: %q) "+ "signed with %q by Account %q (claim-name: %q, claim-tags: %q) signed with %q", - c.typeString(), juc.Subject, juc.Name, juc.Tags, juc.Issuer, issuer, acc.nameTag, acc.tags, acc.Issuer) + c.kindString(), juc.Subject, juc.Name, juc.Tags, juc.Issuer, issuer, acc.nameTag, acc.tags, acc.Issuer) acc.mu.RUnlock() return true } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/client.go b/vendor/github.com/nats-io/nats-server/v2/server/client.go index 6faec179..25650e3a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/client.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/client.go @@ -422,7 +422,7 @@ func (c *client) String() (id string) { return loaded.(string) } - return "" + return _EMPTY_ } // GetName returns the application supplied name for the connection. @@ -474,6 +474,20 @@ func (c *client) clientType() int { } } +var clientTypeStringMap = map[int]string{ + NON_CLIENT: _EMPTY_, + NATS: "nats", + WS: "websocket", + MQTT: "mqtt", +} + +func (c *client) clientTypeString() string { + if typeStringVal, ok := clientTypeStringMap[c.clientType()]; ok { + return typeStringVal + } + return _EMPTY_ +} + // This is the main subscription struct that indicates // interest in published messages. // FIXME(dlc) - This is getting bloated for normal subs, need @@ -719,7 +733,7 @@ func (c *client) applyAccountLimits() { } c.mpay = jwt.NoLimit c.msubs = jwt.NoLimit - if c.opts.JWT != "" { // user jwt implies account + if c.opts.JWT != _EMPTY_ { // user jwt implies account if uc, _ := jwt.DecodeUserClaims(c.opts.JWT); uc != nil { c.mpay = int32(uc.Limits.Payload) c.msubs = int32(uc.Limits.Subs) @@ -1492,10 +1506,12 @@ func (c *client) markConnAsClosed(reason ClosedState) { // Be consistent with the creation: for routes, gateways and leaf, // we use Noticef on create, so use that too for delete. if c.srv != nil { - if c.kind == ROUTER || c.kind == GATEWAY || c.kind == LEAF { - c.Noticef("%s connection closed: %s", c.typeString(), reason) + if c.kind == LEAF { + c.Noticef("%s connection closed: %s account: %s", c.kindString(), reason, c.acc.traceLabel()) + } else if c.kind == ROUTER || c.kind == GATEWAY { + c.Noticef("%s connection closed: %s", c.kindString(), reason) } else { // Client, System, Jetstream, and Account connections. - c.Debugf("%s connection closed: %s", c.typeString(), reason) + c.Debugf("%s connection closed: %s", c.kindString(), reason) } } @@ -1713,7 +1729,7 @@ func (c *client) processConnect(arg []byte) error { } } if ncs != _EMPTY_ { - c.ncs.Store(fmt.Sprintf("%s - %q", c.String(), ncs)) + c.ncs.Store(fmt.Sprintf("%s - %q", c, ncs)) } } @@ -1894,7 +1910,10 @@ func (c *client) maxConnExceeded() { } func (c *client) maxSubsExceeded() { - c.sendErrAndErr(ErrTooManySubs.Error()) + if c.acc.shouldLogMaxSubErr() { + c.Errorf(ErrTooManySubs.Error()) + } + c.sendErr(ErrTooManySubs.Error()) } func (c *client) maxPayloadViolation(sz int, max int32) { @@ -2459,8 +2478,9 @@ func (c *client) processSubEx(subject, queue, bsid []byte, cb msgHandler, noForw // Used to pass stream import matches to addShadowSub type ime struct { - im *streamImport - dyn bool + im *streamImport + overlapSubj string + dyn bool } // If the client's account has stream imports and there are matches for @@ -2472,47 +2492,84 @@ func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error { } var ( - _ims [16]ime - ims = _ims[:0] - tokens []string - tsa [32]string - hasWC bool + _ims [16]ime + ims = _ims[:0] + imTsa [32]string + tokens []string + tsa [32]string + hasWC bool + tokensModified bool ) acc.mu.RLock() - // Loop over the import subjects. We have 3 scenarios. If we have an + subj := string(sub.subject) + if len(acc.imports.streams) > 0 { + tokens = tokenizeSubjectIntoSlice(tsa[:0], subj) + for _, tk := range tokens { + if tk == pwcs { + hasWC = true + break + } + } + if !hasWC && tokens[len(tokens)-1] == fwcs { + hasWC = true + } + } + // Loop over the import subjects. We have 4 scenarios. If we have an // exact match or a superset match we should use the from field from - // the import. If we are a subset, we have to dynamically calculate - // the subject. + // the import. If we are a subset or overlap, we have to dynamically calculate + // the subject. On overlap, ime requires the overlap subject. for _, im := range acc.imports.streams { if im.invalid { continue } - subj := string(sub.subject) if subj == im.to { - ims = append(ims, ime{im, false}) + ims = append(ims, ime{im, _EMPTY_, false}) continue } - if tokens == nil { - tokens = tsa[:0] - start := 0 - for i := 0; i < len(subj); i++ { - // This is not perfect, but the test below will - // be more exact, this is just to trigger the - // additional test. - if subj[i] == pwc || subj[i] == fwc { - hasWC = true - } else if subj[i] == btsep { - tokens = append(tokens, subj[start:i]) - start = i + 1 + if tokensModified { + // re-tokenize subj to overwrite modifications from a previous iteration + tokens = tokenizeSubjectIntoSlice(tsa[:0], subj) + tokensModified = false + } + imTokens := tokenizeSubjectIntoSlice(imTsa[:0], im.to) + + if isSubsetMatchTokenized(tokens, imTokens) { + ims = append(ims, ime{im, _EMPTY_, true}) + } else if hasWC { + if isSubsetMatchTokenized(imTokens, tokens) { + ims = append(ims, ime{im, _EMPTY_, false}) + } else { + imTokensLen := len(imTokens) + for i, t := range tokens { + if i >= imTokensLen { + break + } + if t == pwcs && imTokens[i] != fwcs { + tokens[i] = imTokens[i] + tokensModified = true + } + } + tokensLen := len(tokens) + lastIdx := tokensLen - 1 + if tokens[lastIdx] == fwcs { + if imTokensLen >= tokensLen { + // rewrite ">" in tokens to be more specific + tokens[lastIdx] = imTokens[lastIdx] + tokensModified = true + if imTokensLen > tokensLen { + // copy even more specific parts from import + tokens = append(tokens, imTokens[tokensLen:]...) + } + } + } + if isSubsetMatchTokenized(tokens, imTokens) { + // As isSubsetMatchTokenized was already called with tokens and imTokens, + // we wouldn't be here if it where not for tokens being modified. + // Hence, Join to re compute the subject string + ims = append(ims, ime{im, strings.Join(tokens, tsep), true}) } } - tokens = append(tokens, subj[start:]) - } - if isSubsetMatch(tokens, im.to) { - ims = append(ims, ime{im, true}) - } else if hasWC && subjectIsSubsetMatch(im.to, subj) { - ims = append(ims, ime{im, false}) } } acc.mu.RUnlock() @@ -2553,13 +2610,21 @@ func (c *client) addShadowSub(sub *subscription, ime *ime) (*subscription, error if im.rtr == nil { im.rtr = im.tr.reverse() } - subj, err := im.rtr.transformSubject(string(nsub.subject)) + s := string(nsub.subject) + if ime.overlapSubj != _EMPTY_ { + s = ime.overlapSubj + } + subj, err := im.rtr.transformSubject(s) if err != nil { return nil, err } nsub.subject = []byte(subj) } else if !im.usePub || !ime.dyn { - nsub.subject = []byte(im.from) + if ime.overlapSubj != _EMPTY_ { + nsub.subject = []byte(ime.overlapSubj) + } else { + nsub.subject = []byte(im.from) + } } // Else use original subject c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name) @@ -2963,7 +3028,7 @@ var needFlush = struct{}{} // deliverMsg will deliver a message to a matching subscription and its underlying client. // We process all connection/client types. mh is the part that will be protocol/client specific. -func (c *client) deliverMsg(sub *subscription, subject, reply, mh, msg []byte, gwrply bool) bool { +func (c *client) deliverMsg(sub *subscription, acc *Account, subject, reply, mh, msg []byte, gwrply bool) bool { if sub.client == nil { return false } @@ -3067,9 +3132,9 @@ func (c *client) deliverMsg(sub *subscription, subject, reply, mh, msg []byte, g // Internal account clients are for service imports and need the '\r\n'. if client.kind == ACCOUNT { - sub.icb(sub, c, string(subject), string(reply), msg) + sub.icb(sub, c, acc, string(subject), string(reply), msg) } else { - sub.icb(sub, c, string(subject), string(reply), msg[:msgSize]) + sub.icb(sub, c, acc, string(subject), string(reply), msg[:msgSize]) } return true } @@ -3536,7 +3601,6 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) { atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 { flag |= pmrCollectQueueNames } - didDeliver, qnames = c.processMsgResults(c.acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, flag) } @@ -3585,11 +3649,16 @@ func (c *client) handleGWReplyMap(msg []byte) bool { // Check for leaf nodes if c.srv.gwLeafSubs.Count() > 0 { if r := c.srv.gwLeafSubs.Match(string(c.pa.subject)); len(r.psubs) > 0 { - c.processMsgResults(c.acc, r, msg, nil, c.pa.subject, c.pa.reply, pmrNoFlag) + c.processMsgResults(c.acc, r, msg, c.pa.deliver, c.pa.subject, c.pa.reply, pmrNoFlag) } } if c.srv.gateway.enabled { - c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, nil) + reply := c.pa.reply + if len(c.pa.deliver) > 0 && c.kind == JETSTREAM && len(c.pa.reply) > 0 { + reply = append(reply, '@') + reply = append(reply, c.pa.deliver...) + } + c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, nil) } return true } @@ -3734,8 +3803,18 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt acc.mu.RLock() shouldReturn := si.invalid || acc.sl == nil + checkJSGetNext := !isResponse && si.to == jsAllAPI && strings.HasPrefix(string(c.pa.subject), jsRequestNextPre) acc.mu.RUnlock() + // We have a special case where JetStream pulls in all service imports through one export. + // However the GetNext for consumers is a no-op and causes buildups of service imports, + // response service imports and rrMap entries which all will need to simply expire. + // TODO(dlc) - Come up with something better. + if checkJSGetNext && si.se != nil && si.se.acc == c.srv.SystemAccount() { + shouldReturn = true + } + + // Check for short circuit return. if shouldReturn { return } @@ -3754,9 +3833,11 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt nrr = []byte(rsi.from) } } else { + // This only happens when we do a pull subscriber that trampolines through another account. + // Normally this code is not called. nrr = c.pa.reply } - } else if !si.response && si.latency != nil && tracking { + } else if !isResponse && si.latency != nil && tracking { // Check to see if this was a bad request with no reply and we were supposed to be tracking. si.acc.sendBadRequestTrackingLatency(si, c, headers) } @@ -3773,7 +3854,6 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt // Pick correct "to" subject. If we matched on a wildcard use the literal publish subject. to, subject := si.to, string(c.pa.subject) - hadPrevSi := c.pa.psi != nil if si.tr != nil { // FIXME(dlc) - This could be slow, may want to look at adding cache to bare transforms? to, _ = si.tr.transformSubject(subject) @@ -3781,9 +3861,8 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt to = subject } - // Copy our pubArg and account + // Copy our pubArg since this gets modified as we process the service import itself. pacopy := c.pa - oacc := c.acc // Now check to see if this account has mappings that could affect the service import. // Can't use non-locked trick like in processInboundClientMsg, so just call into selectMappedSubject @@ -3793,9 +3872,8 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt to = nsubj } - // Change this so that we detect recursion - // Remember prior. - share := si.share + // Set previous service import to detect chaining. + hadPrevSi, share := c.pa.psi != nil, si.share if hadPrevSi { share = c.pa.psi.share } @@ -3803,13 +3881,13 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt // Place our client info for the request in the original message. // This will survive going across routes, etc. - if !si.response { + if !isResponse { var ci *ClientInfo if hadPrevSi && c.pa.hdr >= 0 { var cis ClientInfo if err := json.Unmarshal(getHeader(ClientInfoHdr, msg[:c.pa.hdr]), &cis); err == nil { ci = &cis - ci.Service = c.acc.Name + ci.Service = acc.Name } } else if c.kind != LEAF || c.pa.hdr < 0 || len(getHeader(ClientInfoHdr, msg[:c.pa.hdr])) == 0 { ci = c.getClientInfo(share) @@ -3823,13 +3901,10 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt } // Set our optional subject(to) and reply. - if !si.response && to != subject { + if !isResponse && to != subject { c.pa.subject = []byte(to) } c.pa.reply = nrr - c.mu.Lock() - c.acc = si.acc - c.mu.Unlock() // FIXME(dlc) - Do L1 cache trick like normal client? rr := si.acc.sl.Match(to) @@ -3863,17 +3938,14 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt didDeliver, _ = c.processMsgResults(si.acc, rr, msg, c.pa.deliver, []byte(to), nrr, flags) } - // Put what was there back now. + // Restore to original values. c.in.rts = orts c.pa = pacopy - c.mu.Lock() - c.acc = oacc - c.mu.Unlock() // Determine if we should remove this service import. This is for response service imports. // We will remove if we did not deliver, or if we are a response service import and we are // a singleton, or we have an EOF message. - shouldRemove := !didDeliver || (si.response && (si.rt == Singleton || len(msg) == LEN_CR_LF)) + shouldRemove := !didDeliver || (isResponse && (si.rt == Singleton || len(msg) == LEN_CR_LF)) // If we are tracking and we did not actually send the latency info we need to suppress the removal. if si.tracking && !didSendTL { shouldRemove = false @@ -3891,7 +3963,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt if !didDeliver { reason = rsiNoDelivery } - if si.isRespServiceImport() { + if isResponse { acc.removeRespServiceImport(si, reason) } else { // This is a main import and since we could not even deliver to the exporting account @@ -4035,12 +4107,12 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, // Remap to the original subject if internal. if sub.icb != nil && sub.rsi { - subj = subject + dsubj = subject } // Normal delivery mh := c.msgHeader(dsubj, creply, sub) - didDeliver = c.deliverMsg(sub, subj, creply, mh, msg, rplyHasGWPrefix) || didDeliver + didDeliver = c.deliverMsg(sub, acc, dsubj, creply, mh, msg, rplyHasGWPrefix) || didDeliver } // Set these up to optionally filter based on the queue lists. @@ -4164,7 +4236,7 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver, } mh := c.msgHeader(dsubj, creply, sub) - if c.deliverMsg(sub, subject, creply, mh, msg, rplyHasGWPrefix) { + if c.deliverMsg(sub, acc, subject, creply, mh, msg, rplyHasGWPrefix) { didDeliver = true // Clear rsub rsub = nil @@ -4241,7 +4313,7 @@ sendToRoutesOrLeafs: } mh := c.msgHeaderForRouteOrLeaf(subject, reply, rt, acc) - didDeliver = c.deliverMsg(rt.sub, subject, reply, mh, dmsg, false) || didDeliver + didDeliver = c.deliverMsg(rt.sub, acc, subject, reply, mh, dmsg, false) || didDeliver // If we set the header reset the origin pub args. if hset { @@ -4312,7 +4384,7 @@ func (c *client) processPingTimer() { return } - c.Debugf("%s Ping Timer", c.typeString()) + c.Debugf("%s Ping Timer", c.kindString()) var sendPing bool @@ -4448,7 +4520,7 @@ func (c *client) flushAndClose(minimalFlush bool) { } } -var typeStringMap = map[int]string{ +var kindStringMap = map[int]string{ CLIENT: "Client", ROUTER: "Router", GATEWAY: "Gateway", @@ -4458,11 +4530,10 @@ var typeStringMap = map[int]string{ SYSTEM: "System", } -func (c *client) typeString() string { - if typeStringVal, ok := typeStringMap[c.kind]; ok { - return typeStringVal +func (c *client) kindString() string { + if kindStringVal, ok := kindStringMap[c.kind]; ok { + return kindStringVal } - return "Unknown Type" } @@ -4924,6 +4995,8 @@ func (c *client) getClientInfo(detailed bool) *ClientInfo { ci.IssuerKey = issuerForClient(c) ci.NameTag = c.nameTag ci.Tags = c.tags + ci.Kind = c.kindString() + ci.ClientType = c.clientTypeString() } c.mu.Unlock() return &ci diff --git a/vendor/github.com/nats-io/nats-server/v2/server/const.go b/vendor/github.com/nats-io/nats-server/v2/server/const.go index fd0ad0ea..4d83f4d2 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/const.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/const.go @@ -41,7 +41,7 @@ var ( const ( // VERSION is the current version for the server. - VERSION = "2.2.6" + VERSION = "2.3.3" // PROTO is the currently supported protocol. // 0 was the original @@ -202,5 +202,5 @@ const ( DEFAULT_GLOBAL_ACCOUNT = "$G" // DEFAULT_FETCH_TIMEOUT is the default time that the system will wait for an account fetch to return. - DEFAULT_ACCOUNT_FETCH_TIMEOUT = 2 * time.Second + DEFAULT_ACCOUNT_FETCH_TIMEOUT = 1900 * time.Millisecond ) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/consumer.go b/vendor/github.com/nats-io/nats-server/v2/server/consumer.go index 4c227b54..402aff3e 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/consumer.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/consumer.go @@ -48,6 +48,7 @@ type ConsumerInfo struct { type ConsumerConfig struct { Durable string `json:"durable_name,omitempty"` + Description string `json:"description,omitempty"` DeliverSubject string `json:"deliver_subject,omitempty"` DeliverPolicy DeliverPolicy `json:"deliver_policy"` OptStartSeq uint64 `json:"opt_start_seq,omitempty"` @@ -85,8 +86,10 @@ const ( DeliverNew // DeliverByStartSequence will look for a defined starting sequence to start. DeliverByStartSequence - // DeliverByStartTime will select the first messsage with a timestamp >= to StartTime + // DeliverByStartTime will select the first messsage with a timestamp >= to StartTime. DeliverByStartTime + // DeliverLastPerSubject will start the consumer with the last message for all subjects received. + DeliverLastPerSubject ) func (dp DeliverPolicy) String() string { @@ -101,6 +104,8 @@ func (dp DeliverPolicy) String() string { return "by_start_sequence" case DeliverByStartTime: return "by_start_time" + case DeliverLastPerSubject: + return "last_per_subject" default: return "undefined" } @@ -185,6 +190,7 @@ type consumer struct { asflr uint64 sgap uint64 dsubj string + lss *lastSeqSkipList rlimit *rate.Limiter reqSub *subscription ackSub *subscription @@ -270,65 +276,69 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri } if config == nil { - return nil, fmt.Errorf("consumer config required") + return nil, ApiErrors[JSConsumerConfigRequiredErr] + } + + if len(config.Description) > JSMaxDescriptionLen { + return nil, ApiErrors[JSConsumerDescriptionTooLongErrF].NewT("{max}", JSMaxDescriptionLen) } var err error // For now expect a literal subject if its not empty. Empty means work queue mode (pull mode). if config.DeliverSubject != _EMPTY_ { if !subjectIsLiteral(config.DeliverSubject) { - return nil, fmt.Errorf("consumer deliver subject has wildcards") + return nil, ApiErrors[JSConsumerDeliverToWildcardsErr] } if mset.deliveryFormsCycle(config.DeliverSubject) { - return nil, fmt.Errorf("consumer deliver subject forms a cycle") + return nil, ApiErrors[JSConsumerDeliverCycleErr] } if config.MaxWaiting != 0 { - return nil, fmt.Errorf("consumer in push mode can not set max waiting") + return nil, ApiErrors[JSConsumerDeliverToWildcardsErr] } if config.MaxAckPending > 0 && config.AckPolicy == AckNone { - return nil, fmt.Errorf("consumer requires ack policy for max ack pending") + return nil, ApiErrors[JSConsumerMaxPendingAckPolicyRequiredErr] } if config.Heartbeat > 0 && config.Heartbeat < 100*time.Millisecond { - return nil, fmt.Errorf("consumer idle heartbeat needs to be >= 100ms") + return nil, ApiErrors[JSConsumerSmallHeartbeatErr] } } else { // Pull mode / work queue mode require explicit ack. if config.AckPolicy != AckExplicit { - return nil, fmt.Errorf("consumer in pull mode requires explicit ack policy") + return nil, ApiErrors[JSConsumerPullRequiresAckErr] } // They are also required to be durable since otherwise we will not know when to // clean them up. if config.Durable == _EMPTY_ { - return nil, fmt.Errorf("consumer in pull mode requires a durable name") + return nil, ApiErrors[JSConsumerPullNotDurableErr] } if config.RateLimit > 0 { - return nil, fmt.Errorf("consumer in pull mode can not have rate limit set") + return nil, ApiErrors[JSConsumerPullWithRateLimitErr] } if config.MaxWaiting < 0 { - return nil, fmt.Errorf("consumer max waiting needs to be positive") + return nil, ApiErrors[JSConsumerMaxWaitingNegativeErr] } // Set to default if not specified. if config.MaxWaiting == 0 { config.MaxWaiting = JSWaitQueueDefaultMax } if config.Heartbeat > 0 { - return nil, fmt.Errorf("consumer idle heartbeat requires a push based consumer") + return nil, ApiErrors[JSConsumerHBRequiresPushErr] } if config.FlowControl { - return nil, fmt.Errorf("consumer flow control requires a push based consumer") + return nil, ApiErrors[JSConsumerFCRequiresPushErr] } } // Direct need to be non-mapped ephemerals. if config.Direct { if config.DeliverSubject == _EMPTY_ { - return nil, fmt.Errorf("consumer direct requires a push based consumer") + return nil, ApiErrors[JSConsumerDirectRequiresPushErr] } if isDurableConsumer(config) { - return nil, fmt.Errorf("consumer direct requires an ephemeral consumer") + return nil, ApiErrors[JSConsumerDirectRequiresEphemeralErr] } if ca != nil { - return nil, fmt.Errorf("consumer direct on a mapped consumer") + return nil, ApiErrors[JSConsumerOnMappedErr] } } @@ -345,11 +355,11 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri config.MaxAckPending = JsDefaultMaxAckPending } - // Make sure any partition subject is also a literal. + // As best we can make sure the filtered subject is valid. if config.FilterSubject != _EMPTY_ { subjects, hasExt := mset.allSubjects() if !validFilteredSubject(config.FilterSubject, subjects) && !hasExt { - return nil, fmt.Errorf("consumer filter subject is not a valid subset of the interest subjects") + return nil, ApiErrors[JSConsumerFilterNotSubsetErr] } } @@ -357,54 +367,84 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri switch config.DeliverPolicy { case DeliverAll: if config.OptStartSeq > 0 { - return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start sequence is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver all, but optional start sequence is also set") } if config.OptStartTime != nil { - return nil, fmt.Errorf("consumer delivery policy is deliver all, but optional start time is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver all, but optional start time is also set") } case DeliverLast: if config.OptStartSeq > 0 { - return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start sequence is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver last, but optional start sequence is also set") + } + if config.OptStartTime != nil { + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver last, but optional start time is also set") + } + case DeliverLastPerSubject: + if config.OptStartSeq > 0 { + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver last per subject, but optional start sequence is also set") } if config.OptStartTime != nil { - return nil, fmt.Errorf("consumer delivery policy is deliver last, but optional start time is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver last per subject, but optional start time is also set") + } + badConfig := config.FilterSubject == _EMPTY_ + if !badConfig { + subjects, ext := mset.allSubjects() + if len(subjects) == 1 && !ext && subjects[0] == config.FilterSubject && subjectIsLiteral(subjects[0]) { + badConfig = true + } + } + if badConfig { + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver last per subject, but filter subject is not set") } case DeliverNew: if config.OptStartSeq > 0 { - return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start sequence is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver new, but optional start sequence is also set") } if config.OptStartTime != nil { - return nil, fmt.Errorf("consumer delivery policy is deliver new, but optional start time is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver new, but optional start time is also set") } case DeliverByStartSequence: if config.OptStartSeq == 0 { - return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start sequence is not set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver by start sequence, but optional start sequence is not set") } if config.OptStartTime != nil { - return nil, fmt.Errorf("consumer delivery policy is deliver by start sequence, but optional start time is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver by start sequence, but optional start time is also set") } case DeliverByStartTime: if config.OptStartTime == nil { - return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start time is not set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver by start time, but optional start time is not set") } if config.OptStartSeq != 0 { - return nil, fmt.Errorf("consumer delivery policy is deliver by start time, but optional start sequence is also set") + return nil, ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver by start time, but optional start sequence is also set") } } sampleFreq := 0 - if config.SampleFrequency != "" { + if config.SampleFrequency != _EMPTY_ { s := strings.TrimSuffix(config.SampleFrequency, "%") sampleFreq, err = strconv.Atoi(s) if err != nil { - return nil, fmt.Errorf("failed to parse consumer sampling configuration: %v", err) + return nil, ApiErrors[JSConsumerInvalidSamplingErrF].NewT("{err}", err) } } // Grab the client, account and server reference. c := mset.client if c == nil { - return nil, fmt.Errorf("stream not valid") + return nil, ApiErrors[JSStreamInvalidErr] } c.mu.Lock() s, a := c.srv, c.acc @@ -427,7 +467,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri eo.updateDeliverSubject(config.DeliverSubject) return eo, nil } else { - return nil, fmt.Errorf("consumer already exists") + return nil, ApiErrors[JSConsumerNameExistErr] } } } @@ -438,13 +478,12 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri // than stream config we prefer the account limits to handle cases where account limits are // updated during the lifecycle of the stream maxc := mset.cfg.MaxConsumers - if mset.cfg.MaxConsumers <= 0 || mset.jsa.limits.MaxConsumers < mset.cfg.MaxConsumers { + if maxc <= 0 || (mset.jsa.limits.MaxConsumers > 0 && mset.jsa.limits.MaxConsumers < maxc) { maxc = mset.jsa.limits.MaxConsumers } - if maxc > 0 && len(mset.consumers) >= maxc { mset.mu.Unlock() - return nil, fmt.Errorf("maximum consumers limit reached") + return nil, ApiErrors[JSMaximumConsumersLimitErr] } // Check on stream type conflicts with WorkQueues. @@ -452,22 +491,22 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri // Force explicit acks here. if config.AckPolicy != AckExplicit { mset.mu.Unlock() - return nil, fmt.Errorf("workqueue stream requires explicit ack") + return nil, ApiErrors[JSConsumerWQRequiresExplicitAckErr] } if len(mset.consumers) > 0 { if config.FilterSubject == _EMPTY_ { mset.mu.Unlock() - return nil, fmt.Errorf("multiple non-filtered consumers not allowed on workqueue stream") + return nil, ApiErrors[JSConsumerWQMultipleUnfilteredErr] } else if !mset.partitionUnique(config.FilterSubject) { // We have a partition but it is not unique amongst the others. mset.mu.Unlock() - return nil, fmt.Errorf("filtered consumer not unique on workqueue stream") + return nil, ApiErrors[JSConsumerWQConsumerNotUniqueErr] } } if config.DeliverPolicy != DeliverAll { mset.mu.Unlock() - return nil, fmt.Errorf("consumer must be deliver all on workqueue stream") + return nil, ApiErrors[JSConsumerWQConsumerNotDeliverAllErr] } } @@ -499,7 +538,8 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri if isDurableConsumer(config) { if len(config.Durable) > JSMaxNameLen { mset.mu.Unlock() - return nil, fmt.Errorf("consumer name is too long, maximum allowed is %d", JSMaxNameLen) + o.deleteWithoutAdvisory() + return nil, ApiErrors[JSConsumerNameTooLongErrF].NewT("{max}", JSMaxNameLen) } o.name = config.Durable if o.isPullMode() { @@ -547,7 +587,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri if !isValidName(o.name) { mset.mu.Unlock() o.deleteWithoutAdvisory() - return nil, fmt.Errorf("durable name can not contain '.', '*', '>'") + return nil, ApiErrors[JSConsumerBadDurableNameErr] } // Select starting sequence number @@ -558,7 +598,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri if err != nil { mset.mu.Unlock() o.deleteWithoutAdvisory() - return nil, fmt.Errorf("error creating store for consumer: %v", err) + return nil, ApiErrors[JSConsumerStoreFailedErrF].NewT("{err}", err) } o.store = store } @@ -570,20 +610,20 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri if !o.isDurable() || !o.isPushMode() { o.name = _EMPTY_ // Prevent removal since same name. o.deleteWithoutAdvisory() - return nil, fmt.Errorf("consumer already exists") + return nil, ApiErrors[JSConsumerNameExistErr] } // If we are here we have already registered this durable. If it is still active that is an error. if eo.isActive() { o.name = _EMPTY_ // Prevent removal since same name. o.deleteWithoutAdvisory() - return nil, fmt.Errorf("consumer already exists and is still active") + return nil, ApiErrors[JSConsumerExistingActiveErr] } // Since we are here this means we have a potentially new durable so we should update here. // Check that configs are the same. if !configsEqualSansDelivery(o.cfg, eo.cfg) { o.name = _EMPTY_ // Prevent removal since same name. o.deleteWithoutAdvisory() - return nil, fmt.Errorf("consumer replacement durable config not the same") + return nil, ApiErrors[JSConsumerReplacementWithDifferentNameErr] } // Once we are here we have a replacement push-based durable. eo.updateDeliverSubject(o.cfg.DeliverSubject) @@ -606,14 +646,8 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri // Check in place here for interest. Will setup properly in setLeader. r := o.acc.sl.Match(o.cfg.DeliverSubject) if !o.hasDeliveryInterest(len(r.psubs)+len(r.qsubs) > 0) { - // Directs can let the interest come to us eventually, but setup delete timer. - if config.Direct { - o.updateDeliveryInterest(false) - } else { - mset.mu.Unlock() - o.deleteWithoutAdvisory() - return nil, errNoInterest - } + // Let the interest come to us eventually, but setup delete timer. + o.updateDeliveryInterest(false) } } } @@ -724,8 +758,8 @@ func (o *consumer) setLeader(isLeader bool) { } } - // Setup initial pending. - o.setInitialPending() + // Setup initial pending and proper start sequence. + o.setInitialPendingAndStart() // If push mode, register for notifications on interest. if o.isPushMode() { @@ -784,7 +818,7 @@ func (o *consumer) setLeader(isLeader bool) { } } -func (o *consumer) handleClusterConsumerInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) { +func (o *consumer) handleClusterConsumerInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { o.mu.RLock() sysc := o.sysc o.mu.RUnlock() @@ -822,7 +856,7 @@ func (o *consumer) unsubscribe(sub *subscription) { // We need to make sure we protect access to the outq. // Do all advisory sends here. func (o *consumer) sendAdvisory(subj string, msg []byte) { - o.outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, msg, nil, 0, nil}) + o.outq.sendMsg(subj, msg) } func (o *consumer) sendDeleteAdvisoryLocked() { @@ -1103,7 +1137,7 @@ func (o *consumer) sendAckReply(subj string) { } // Process a message for the ack reply subject delivered with a message. -func (o *consumer) processAck(_ *subscription, c *client, subject, reply string, rmsg []byte) { +func (o *consumer) processAck(_ *subscription, c *client, acc *Account, subject, reply string, rmsg []byte) { _, msg := c.msgParts(rmsg) sseq, dseq, dc := ackReplyInfo(subject) @@ -1122,7 +1156,7 @@ func (o *consumer) processAck(_ *subscription, c *client, subject, reply string, // somewhere else. phdr := c.pa.hdr c.pa.hdr = -1 - o.processNextMsgReq(nil, c, subject, reply, msg[len(AckNext):]) + o.processNextMsgReq(nil, c, acc, subject, reply, msg[len(AckNext):]) c.pa.hdr = phdr skipAckReply = true case bytes.Equal(msg, AckNak): @@ -1195,6 +1229,9 @@ func (o *consumer) loopAndForwardProposals(qch chan struct{}) { } } + // In case we have anything pending on entry. + forwardProposals() + for { select { case <-qch: @@ -1439,7 +1476,7 @@ func (o *consumer) info() *ConsumerInfo { }, NumAckPending: len(o.pending), NumRedelivered: len(o.rdc), - NumPending: o.sgap, + NumPending: o.adjustedPending(), Cluster: ci, } // If we are a pull mode consumer, report on number of waiting requests. @@ -1767,7 +1804,7 @@ func (wq *waitQueue) pop() *waitingRequest { // processNextMsgReq will process a request for the next message available. A nil message payload means deliver // a single message. If the payload is a formal request or a number parseable with Atoi(), then we will send a // batch of messages without requiring another request to this endpoint, or an ACK. -func (o *consumer) processNextMsgReq(_ *subscription, c *client, _, reply string, msg []byte) { +func (o *consumer) processNextMsgReq(_ *subscription, c *client, _ *Account, _, reply string, msg []byte) { _, msg = c.msgParts(msg) o.mu.Lock() @@ -1928,7 +1965,16 @@ func (o *consumer) getNextMsg() (subj string, hdr, msg []byte, seq uint64, dc ui } for { seq, dc := o.sseq, uint64(1) - if o.hasRedeliveries() { + if o.hasSkipListPending() { + seq = o.lss.seqs[0] + if len(o.lss.seqs) == 1 { + o.sseq = o.lss.resume + o.lss = nil + o.updateSkipped() + } else { + o.lss.seqs = o.lss.seqs[1:] + } + } else if o.hasRedeliveries() { seq = o.getNextToRedeliver() dc = o.incDeliveryCount(seq) if o.maxdc > 0 && dc > o.maxdc { @@ -2197,21 +2243,33 @@ func (o *consumer) setMaxPendingBytes(limit int) { } } +// We have the case where a consumer can become greedy and pick up a messages before the stream has incremented our pending(sgap). +// Instead of trying to slow things down and synchronize we will allow this to wrap and go negative (biggest uint64) for a short time. +// This functions checks for that and returns 0. +// Lock should be held. +func (o *consumer) adjustedPending() uint64 { + if o.sgap&(1<<63) != 0 { + return 0 + } + return o.sgap +} + // Deliver a msg to the consumer. // Lock should be held and o.mset validated to be non-nil. func (o *consumer) deliverMsg(dsubj, subj string, hdr, msg []byte, seq, dc uint64, ts int64) { if o.mset == nil { return } - // Update pending on first attempt - if dc == 1 && o.sgap > 0 { + // Update pending on first attempt. This can go upside down for a short bit, that is ok. + // See adjustedPending(). + if dc == 1 { o.sgap-- } dseq := o.dseq o.dseq++ - pmsg := &jsPubMsg{dsubj, subj, o.ackReply(seq, dseq, dc, ts, o.sgap), hdr, msg, o, seq, nil} + pmsg := &jsPubMsg{dsubj, subj, o.ackReply(seq, dseq, dc, ts, o.adjustedPending()), hdr, msg, o, seq, nil} if o.maxpb > 0 { o.pbytes += pmsg.size() } @@ -2255,7 +2313,7 @@ func (o *consumer) needFlowControl() bool { return false } -func (o *consumer) processFlowControl(_ *subscription, c *client, subj, _ string, _ []byte) { +func (o *consumer) processFlowControl(_ *subscription, c *client, _ *Account, subj, _ string, _ []byte) { o.mu.Lock() defer o.mu.Unlock() @@ -2554,32 +2612,32 @@ func ackReplyInfo(subject string) (sseq, dseq, dc uint64) { // NextSeq returns the next delivered sequence number for this consumer. func (o *consumer) nextSeq() uint64 { - o.mu.Lock() + o.mu.RLock() dseq := o.dseq - o.mu.Unlock() + o.mu.RUnlock() return dseq } -// This will select the store seq to start with based on the -// partition subject. -func (o *consumer) selectSubjectLast() { - stats := o.mset.store.State() - if stats.LastSeq == 0 { - o.sseq = stats.LastSeq - return - } - // FIXME(dlc) - this is linear and can be optimized by store layer. - for seq := stats.LastSeq; seq >= stats.FirstSeq; seq-- { - subj, _, _, _, err := o.mset.store.LoadMsg(seq) - if err == ErrStoreMsgNotFound { - continue - } - if o.isFilteredMatch(subj) { - o.sseq = seq - o.updateSkipped() - return - } +// Used to hold skip list when deliver policy is last per subject. +type lastSeqSkipList struct { + resume uint64 + seqs []uint64 +} + +// Will create a skip list for us from a store's subjects state. +func createLastSeqSkipList(mss map[string]SimpleState) []uint64 { + seqs := make([]uint64, 0, len(mss)) + for _, ss := range mss { + seqs = append(seqs, ss.Last) } + sort.Slice(seqs, func(i, j int) bool { return seqs[i] < seqs[j] }) + return seqs +} + +// Let's us know we have a skip list, which is for deliver last per subject and we are just starting. +// Lock should be held. +func (o *consumer) hasSkipListPending() bool { + return o.lss != nil && len(o.lss.seqs) > 0 } // Will select the starting sequence. @@ -2593,16 +2651,27 @@ func (o *consumer) selectStartingSeqNo() { o.sseq = stats.FirstSeq } else if o.cfg.DeliverPolicy == DeliverLast { o.sseq = stats.LastSeq - // If we are partitioned here we may need to walk backwards. + // If we are partitioned here this will be properly set when we become leader. if o.cfg.FilterSubject != _EMPTY_ { - o.selectSubjectLast() + ss := o.mset.store.FilteredState(1, o.cfg.FilterSubject) + o.sseq = ss.Last + } + } else if o.cfg.DeliverPolicy == DeliverLastPerSubject { + if mss := o.mset.store.SubjectsState(o.cfg.FilterSubject); len(mss) > 0 { + o.lss = &lastSeqSkipList{ + resume: stats.LastSeq, + seqs: createLastSeqSkipList(mss), + } + o.sseq = o.lss.seqs[0] + } else { + // If no mapping info just set to last. + o.sseq = stats.LastSeq } } else if o.cfg.OptStartTime != nil { // If we are here we are time based. // TODO(dlc) - Once clustered can't rely on this. o.sseq = o.mset.store.GetSeqFromTime(*o.cfg.OptStartTime) } else { - // Default is deliver new only. o.sseq = stats.LastSeq + 1 } } else { @@ -2688,20 +2757,19 @@ func (o *consumer) hasNoLocalInterest() bool { } // This is when the underlying stream has been purged. +// sseq is the new first seq for the stream after purge. func (o *consumer) purge(sseq uint64) { + if sseq == 0 { + return + } + o.mu.Lock() o.sseq = sseq o.asflr = sseq - 1 o.adflr = o.dseq - 1 o.sgap = 0 - if len(o.pending) > 0 { - o.pending = nil - if o.ptmr != nil { - o.ptmr.Stop() - // Do not nil this out here. This allows checkPending to fire - // and still be ok and not panic. - } - } + o.pending = nil + // We need to remove all those being queued for redelivery under o.rdq if len(o.rdq) > 0 { rdq := o.rdq @@ -2729,19 +2797,19 @@ func stopAndClearTimer(tp **time.Timer) { // Stop will shutdown the consumer for the associated stream. func (o *consumer) stop() error { - return o.stopWithFlags(false, true, false) + return o.stopWithFlags(false, false, true, false) } func (o *consumer) deleteWithoutAdvisory() error { - return o.stopWithFlags(true, true, false) + return o.stopWithFlags(true, false, true, false) } // Delete will delete the consumer for the associated stream and send advisories. func (o *consumer) delete() error { - return o.stopWithFlags(true, true, true) + return o.stopWithFlags(true, false, true, true) } -func (o *consumer) stopWithFlags(dflag, doSignal, advisory bool) error { +func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error { o.mu.Lock() if o.closed { o.mu.Unlock() @@ -2810,7 +2878,6 @@ func (o *consumer) stopWithFlags(dflag, doSignal, advisory bool) error { // non-leader consumers will need to restore state first. if dflag && rp == InterestPolicy { stop := mset.lastSeq() - o.mu.Lock() if !o.isLeader() { o.readStoredState() @@ -2818,8 +2885,7 @@ func (o *consumer) stopWithFlags(dflag, doSignal, advisory bool) error { start := o.asflr o.mu.Unlock() - rmseqs := make([]uint64, 0, stop-start+1) - + var rmseqs []uint64 mset.mu.RLock() for seq := start; seq <= stop; seq++ { if !mset.checkInterest(seq, o) { @@ -2842,10 +2908,15 @@ func (o *consumer) stopWithFlags(dflag, doSignal, advisory bool) error { } } + // Clean up our store. var err error if store != nil { if dflag { - err = store.Delete() + if sdflag { + err = store.StreamDelete() + } else { + err = store.Delete() + } } else { err = store.Stop() } @@ -2868,11 +2939,22 @@ func (mset *stream) deliveryFormsCycle(deliverySubject string) bool { return false } +// Check that the filtered subject is valid given a set of stream subjects. func validFilteredSubject(filteredSubject string, subjects []string) bool { + if !IsValidSubject(filteredSubject) { + return false + } + hasWC := subjectHasWildcard(filteredSubject) + for _, subject := range subjects { if subjectIsSubsetMatch(filteredSubject, subject) { return true } + // If we have a wildcard as the filtered subject check to see if we are + // a wider scope but do match a subject. + if hasWC && subjectIsSubsetMatch(subject, filteredSubject) { + return true + } } return false } @@ -2920,13 +3002,14 @@ func (o *consumer) requestNextMsgSubject() string { return o.nextMsgSubj } -// Will set the initial pending. +// Will set the initial pending and start sequence. // mset lock should be held. -func (o *consumer) setInitialPending() { +func (o *consumer) setInitialPendingAndStart() { mset := o.mset if mset == nil || mset.store == nil { return } + // notFiltered means we want all messages. notFiltered := o.cfg.FilterSubject == _EMPTY_ if !notFiltered { @@ -2945,7 +3028,30 @@ func (o *consumer) setInitialPending() { } } else { // Here we are filtered. - o.sgap = o.mset.store.NumFilteredPending(o.sseq, o.cfg.FilterSubject) + dp := o.cfg.DeliverPolicy + if dp == DeliverLastPerSubject && o.hasSkipListPending() && o.sseq < o.lss.resume { + if o.lss != nil { + ss := mset.store.FilteredState(o.lss.resume, o.cfg.FilterSubject) + o.sseq = o.lss.seqs[0] + o.sgap = ss.Msgs + uint64(len(o.lss.seqs)) + } + } else if ss := mset.store.FilteredState(o.sseq, o.cfg.FilterSubject); ss.Msgs > 0 { + o.sgap = ss.Msgs + // See if we should update our starting sequence. + if dp == DeliverLast || dp == DeliverLastPerSubject { + o.sseq = ss.Last + } else if dp == DeliverNew { + o.sseq = ss.Last + 1 + } else { + // DeliverAll, DeliverByStartSequence, DeliverByStartTime + o.sseq = ss.First + } + // Cleanup lss when we take over in clustered mode. + if dp == DeliverLastPerSubject && o.hasSkipListPending() && o.sseq >= o.lss.resume { + o.lss = nil + } + } + o.updateSkipped() } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/errors.go b/vendor/github.com/nats-io/nats-server/v2/server/errors.go index ddad6423..f5db0c48 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/errors.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/errors.go @@ -171,47 +171,11 @@ var ( ErrMalformedSubject = errors.New("malformed subject") // ErrSubscribePermissionViolation is returned when processing of a subscription fails due to permissions. - ErrSubscribePermissionViolation = errors.New("subscribe permission viloation") + ErrSubscribePermissionViolation = errors.New("subscribe permission violation") // ErrNoTransforms signals no subject transforms are available to map this subject. ErrNoTransforms = errors.New("no matching transforms available") - // ErrJetStreamNotEnabled is returned when JetStream is not enabled. - ErrJetStreamNotEnabled = errors.New("jetstream not enabled") - - // ErrJetStreamStreamNotFound is returned when a stream can not be found. - ErrJetStreamStreamNotFound = errors.New("stream not found") - - // ErrJetStreamStreamAlreadyUsed is returned when a stream name has already been taken. - ErrJetStreamStreamAlreadyUsed = errors.New("stream name already in use") - - // ErrJetStreamConsumerAlreadyUsed is returned when a consumer name has already been taken. - ErrJetStreamConsumerAlreadyUsed = errors.New("consumer name already in use") - - // ErrJetStreamNotEnabledForAccount is returned JetStream is not enabled for this account. - ErrJetStreamNotEnabledForAccount = errors.New("jetstream not enabled for account") - - // ErrJetStreamNotLeader is returned when issuing commands to a cluster on the wrong server. - ErrJetStreamNotLeader = errors.New("jetstream cluster can not handle request") - - // ErrJetStreamNotAssigned is returned when the resource (stream or consumer) is not assigned. - ErrJetStreamNotAssigned = errors.New("jetstream cluster not assigned to this server") - - // ErrJetStreamNotClustered is returned when a call requires clustering and we are not. - ErrJetStreamNotClustered = errors.New("jetstream not in clustered mode") - - // ErrJetStreamResourcesExceeded is returned when a call would exceed internal resource limits. - ErrJetStreamResourcesExceeded = errors.New("jetstream resources exceeded for server") - - // ErrStorageResourcesExceeded is returned when storage resources would be exceeded. - ErrStorageResourcesExceeded = errors.New("insufficient storage resources available") - - // ErrMemoryResourcesExceeded is returned when memory resources would be exceeded. - ErrMemoryResourcesExceeded = errors.New("insufficient memory resources available") - - // ErrReplicasNotSupported is returned when a stream with replicas > 1 in non-clustered mode. - ErrReplicasNotSupported = errors.New("replicas > 1 not supported in non-clustered mode") - // ErrCertNotPinned is returned when pinned certs are set and the certificate is not in it ErrCertNotPinned = errors.New("certificate not pinned") ) @@ -297,7 +261,7 @@ func NewErrorCtx(err error, format string, args ...interface{}) error { return &errCtx{err, fmt.Sprintf(format, args...)} } -// implement to work with errors.Is and errors.As +// Unwrap implement to work with errors.Is and errors.As func (e *errCtx) Unwrap() error { if e == nil { return nil @@ -313,7 +277,7 @@ func (e *errCtx) Context() string { return e.ctx } -// Return Error or, if type is right error and context +// UnpackIfErrorCtx return Error or, if type is right error and context func UnpackIfErrorCtx(err error) string { if e, ok := err.(*errCtx); ok { if _, ok := e.error.(*errCtx); ok { @@ -336,7 +300,7 @@ func errorsUnwrap(err error) error { return u.Unwrap() } -// implements: go 1.13 errors.Is(err, target error) bool +// ErrorIs implements: go 1.13 errors.Is(err, target error) bool // TODO replace with native code once we no longer support go1.12 func ErrorIs(err, target error) bool { // this is an outright copy of go 1.13 errors.Is(err, target error) bool diff --git a/vendor/github.com/nats-io/nats-server/v2/server/errors.json b/vendor/github.com/nats-io/nats-server/v2/server/errors.json new file mode 100644 index 00000000..8cd2b1ba --- /dev/null +++ b/vendor/github.com/nats-io/nats-server/v2/server/errors.json @@ -0,0 +1,1052 @@ +[ + { + "constant": "JSClusterPeerNotMemberErr", + "code": 400, + "error_code": 10040, + "description": "peer not a member", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerEphemeralWithDurableInSubjectErr", + "code": 400, + "error_code": 10019, + "description": "consumer expected to be ephemeral but detected a durable name set in subject", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamExternalDelPrefixOverlapsErrF", + "code": 400, + "error_code": 10022, + "description": "stream external delivery prefix {prefix} overlaps with stream subject {subject}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSAccountResourcesExceededErr", + "code": 400, + "error_code": 10002, + "description": "resource limits exceeded for account", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSClusterNotAvailErr", + "code": 503, + "error_code": 10008, + "description": "JetStream system temporarily unavailable", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamSubjectOverlapErr", + "code": 500, + "error_code": 10065, + "description": "subjects overlap with an existing stream", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamWrongLastSequenceErrF", + "code": 400, + "error_code": 10071, + "description": "wrong last sequence: {seq}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSTemplateNameNotMatchSubjectErr", + "code": 400, + "error_code": 10073, + "description": "template name in subject does not match request", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSClusterNoPeersErr", + "code": 400, + "error_code": 10005, + "description": "no suitable peers for placement", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerEphemeralWithDurableNameErr", + "code": 400, + "error_code": 10020, + "description": "consumer expected to be ephemeral but a durable name was set in request", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSInsufficientResourcesErr", + "code": 503, + "error_code": 10023, + "description": "insufficient resources", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrJetStreamResourcesExceeded" + }, + { + "constant": "JSMirrorMaxMessageSizeTooBigErr", + "code": 400, + "error_code": 10030, + "description": "stream mirror must have max message size \u003e= source", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamTemplateDeleteErrF", + "code": 500, + "error_code": 10067, + "description": "{err}", + "comment": "Generic stream template deletion failed error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSBadRequestErr", + "code": 400, + "error_code": 10003, + "description": "bad request", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSClusterUnSupportFeatureErr", + "code": 503, + "error_code": 10036, + "description": "not currently supported in clustered mode", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerNotFoundErr", + "code": 404, + "error_code": 10014, + "description": "consumer not found", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSSourceMaxMessageSizeTooBigErr", + "code": 400, + "error_code": 10046, + "description": "stream source must have max message size \u003e= target", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamAssignmentErrF", + "code": 500, + "error_code": 10048, + "description": "{err}", + "comment": "Generic stream assignment error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamMessageExceedsMaximumErr", + "code": 400, + "error_code": 10054, + "description": "message size exceeds maximum allowed", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamTemplateCreateErrF", + "code": 500, + "error_code": 10066, + "description": "{err}", + "comment": "Generic template creation failed string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSInvalidJSONErr", + "code": 400, + "error_code": 10025, + "description": "invalid JSON", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamInvalidExternalDeliverySubjErrF", + "code": 400, + "error_code": 10024, + "description": "stream external delivery prefix {prefix} must not contain wildcards", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamRestoreErrF", + "code": 500, + "error_code": 10062, + "description": "restore failed: {err}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSClusterIncompleteErr", + "code": 503, + "error_code": 10004, + "description": "incomplete results", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSNoAccountErr", + "code": 503, + "error_code": 10035, + "description": "account not found", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSRaftGeneralErrF", + "code": 500, + "error_code": 10041, + "description": "{err}", + "comment": "General RAFT error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSRestoreSubscribeFailedErrF", + "code": 500, + "error_code": 10042, + "description": "JetStream unable to subscribe to restore snapshot {subject}: {err}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamDeleteErrF", + "code": 500, + "error_code": 10050, + "description": "{err}", + "comment": "General stream deletion error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamExternalApiOverlapErrF", + "code": 400, + "error_code": 10021, + "description": "stream external api prefix {prefix} must not overlap with {subject}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSMirrorWithSubjectsErr", + "code": 400, + "error_code": 10034, + "description": "stream mirrors can not also contain subjects", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSNotEnabledErr", + "code": 503, + "error_code": 10076, + "description": "JetStream not enabled", + "comment": "", + "help": "This error indicates that JetStream is not enabled at a global level", + "url": "", + "deprecates": "ErrJetStreamNotEnabled" + }, + { + "constant": "JSNotEnabledForAccountErr", + "code": 503, + "error_code": 10039, + "description": "JetStream not enabled for account", + "comment": "", + "help": "This error indicates that JetStream is not enabled for an account account level", + "url": "", + "deprecates": "" + }, + { + "constant": "JSSequenceNotFoundErrF", + "code": 400, + "error_code": 10043, + "description": "sequence {seq} not found", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamMirrorNotUpdatableErr", + "code": 400, + "error_code": 10055, + "description": "Mirror configuration can not be updated", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamSequenceNotMatchErr", + "code": 503, + "error_code": 10063, + "description": "expected stream sequence does not match", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamWrongLastMsgIDErrF", + "code": 400, + "error_code": 10070, + "description": "wrong last msg ID: {id}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSTempStorageFailedErr", + "code": 500, + "error_code": 10072, + "description": "JetStream unable to open temp storage for restore", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStorageResourcesExceededErr", + "code": 500, + "error_code": 10047, + "description": "insufficient storage resources available", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrStorageResourcesExceeded" + }, + { + "constant": "JSStreamMismatchErr", + "code": 400, + "error_code": 10056, + "description": "stream name in subject does not match request", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamNotMatchErr", + "code": 400, + "error_code": 10060, + "description": "expected stream does not match", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSMirrorConsumerSetupFailedErrF", + "code": 500, + "error_code": 10029, + "description": "{err}", + "comment": "Generic mirror consumer setup failure string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSNotEmptyRequestErr", + "code": 400, + "error_code": 10038, + "description": "expected an empty request payload", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamNameExistErr", + "code": 400, + "error_code": 10058, + "description": "stream name already in use", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrJetStreamStreamAlreadyUsed" + }, + { + "constant": "JSClusterTagsErr", + "code": 400, + "error_code": 10011, + "description": "tags placement not supported for operation", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSMaximumConsumersLimitErr", + "code": 400, + "error_code": 10026, + "description": "maximum consumers limit reached", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSSourceConsumerSetupFailedErrF", + "code": 500, + "error_code": 10045, + "description": "{err}", + "comment": "General source consumer setup failure string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerCreateErrF", + "code": 500, + "error_code": 10012, + "description": "{err}", + "comment": "General consumer creation failure string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerDurableNameNotInSubjectErr", + "code": 400, + "error_code": 10016, + "description": "consumer expected to be durable but no durable name set in subject", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamLimitsErrF", + "code": 500, + "error_code": 10053, + "description": "{err}", + "comment": "General stream limits exceeded error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamReplicasNotUpdatableErr", + "code": 400, + "error_code": 10061, + "description": "Replicas configuration can not be updated", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamTemplateNotFoundErr", + "code": 404, + "error_code": 10068, + "description": "template not found", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSClusterNotAssignedErr", + "code": 500, + "error_code": 10007, + "description": "JetStream cluster not assigned to this server", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrJetStreamNotAssigned" + }, + { + "constant": "JSClusterNotLeaderErr", + "code": 500, + "error_code": 10009, + "description": "JetStream cluster can not handle request", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrJetStreamNotLeader" + }, + { + "constant": "JSConsumerNameExistErr", + "code": 400, + "error_code": 10013, + "description": "consumer name already in use", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrJetStreamConsumerAlreadyUsed" + }, + { + "constant": "JSMirrorWithSourcesErr", + "code": 400, + "error_code": 10031, + "description": "stream mirrors can not also contain other sources", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamNotFoundErr", + "code": 404, + "error_code": 10059, + "description": "stream not found", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrJetStreamStreamNotFound" + }, + { + "constant": "JSClusterRequiredErr", + "code": 503, + "error_code": 10010, + "description": "JetStream clustering support required", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerDurableNameNotSetErr", + "code": 400, + "error_code": 10018, + "description": "consumer expected to be durable but a durable name was not set", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSMaximumStreamsLimitErr", + "code": 400, + "error_code": 10027, + "description": "maximum number of streams reached", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSMirrorWithStartSeqAndTimeErr", + "code": 400, + "error_code": 10032, + "description": "stream mirrors can not have both start seq and start time configured", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamSnapshotErrF", + "code": 500, + "error_code": 10064, + "description": "snapshot failed: {err}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamUpdateErrF", + "code": 500, + "error_code": 10069, + "description": "{err}", + "comment": "Generic stream update error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSClusterNotActiveErr", + "code": 500, + "error_code": 10006, + "description": "JetStream not in clustered mode", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrJetStreamNotClustered" + }, + { + "constant": "JSConsumerDurableNameNotMatchSubjectErr", + "code": 400, + "error_code": 10017, + "description": "consumer name in subject does not match durable name in request", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSMemoryResourcesExceededErr", + "code": 500, + "error_code": 10028, + "description": "insufficient memory resources available", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrMemoryResourcesExceeded" + }, + { + "constant": "JSMirrorWithSubjectFiltersErr", + "code": 400, + "error_code": 10033, + "description": "stream mirrors can not contain filtered subjects", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamCreateErrF", + "code": 500, + "error_code": 10049, + "description": "{err}", + "comment": "Generic stream creation error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSClusterServerNotMemberErr", + "code": 400, + "error_code": 10044, + "description": "server is not a member of the cluster", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSNoMessageFoundErr", + "code": 404, + "error_code": 10037, + "description": "no message found", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSSnapshotDeliverSubjectInvalidErr", + "code": 400, + "error_code": 10015, + "description": "deliver subject not valid", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamGeneralErrorF", + "code": 500, + "error_code": 10051, + "description": "{err}", + "comment": "General stream failure string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamInvalidConfigF", + "code": 500, + "error_code": 10052, + "description": "{err}", + "comment": "Stream configuration validation error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamReplicasNotSupportedErr", + "code": 500, + "error_code": 10074, + "description": "replicas \u003e 1 not supported in non-clustered mode", + "comment": "", + "help": "", + "url": "", + "deprecates": "ErrReplicasNotSupported" + }, + { + "constant": "JSStreamMsgDeleteFailedF", + "code": 500, + "error_code": 10057, + "description": "{err}", + "comment": "Generic message deletion failure error string", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSPeerRemapErr", + "code": 503, + "error_code": 10075, + "description": "peer remap failed", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamStoreFailedF", + "code": 503, + "error_code": 10077, + "description": "{err}", + "comment": "Generic error when storing a message failed", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerConfigRequiredErr", + "code": 400, + "error_code": 10078, + "description": "consumer config required", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerDeliverToWildcardsErr", + "code": 400, + "error_code": 10079, + "description": "consumer deliver subject has wildcards", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerPushMaxWaitingErr", + "code": 400, + "error_code": 10080, + "description": "consumer in push mode can not set max waiting", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerDeliverCycleErr", + "code": 400, + "error_code": 10081, + "description": "consumer deliver subject forms a cycle", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerMaxPendingAckPolicyRequiredErr", + "code": 400, + "error_code": 10082, + "description": "consumer requires ack policy for max ack pending", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerSmallHeartbeatErr", + "code": 400, + "error_code": 10083, + "description": "consumer idle heartbeat needs to be \u003e= 100ms", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerPullRequiresAckErr", + "code": 400, + "error_code": 10084, + "description": "consumer in pull mode requires explicit ack policy", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerPullNotDurableErr", + "code": 400, + "error_code": 10085, + "description": "consumer in pull mode requires a durable name", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerPullWithRateLimitErr", + "code": 400, + "error_code": 10086, + "description": "consumer in pull mode can not have rate limit set", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerMaxWaitingNegativeErr", + "code": 400, + "error_code": 10087, + "description": "consumer max waiting needs to be positive", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerHBRequiresPushErr", + "code": 400, + "error_code": 10088, + "description": "consumer idle heartbeat requires a push based consumer", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerFCRequiresPushErr", + "code": 400, + "error_code": 10089, + "description": "consumer flow control requires a push based consumer", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerDirectRequiresPushErr", + "code": 400, + "error_code": 10090, + "description": "consumer direct requires a push based consumer", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerDirectRequiresEphemeralErr", + "code": 400, + "error_code": 10091, + "description": "consumer direct requires an ephemeral consumer", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerOnMappedErr", + "code": 400, + "error_code": 10092, + "description": "consumer direct on a mapped consumer", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerFilterNotSubsetErr", + "code": 400, + "error_code": 10093, + "description": "consumer filter subject is not a valid subset of the interest subjects", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerInvalidPolicyErrF", + "code": 400, + "error_code": 10094, + "description": "{err}", + "comment": "Generic delivery policy error", + "help": "Error returned for impossible deliver policies when combined with start sequences etc", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerInvalidSamplingErrF", + "code": 400, + "error_code": 10095, + "description": "failed to parse consumer sampling configuration: {err}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSStreamInvalidErr", + "code": 500, + "error_code": 10096, + "description": "stream not valid", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerWQRequiresExplicitAckErr", + "code": 400, + "error_code": 10098, + "description": "workqueue stream requires explicit ack", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerWQMultipleUnfilteredErr", + "code": 400, + "error_code": 10099, + "description": "multiple non-filtered consumers not allowed on workqueue stream", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerWQConsumerNotUniqueErr", + "code": 400, + "error_code": 10100, + "description": "filtered consumer not unique on workqueue stream", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerWQConsumerNotDeliverAllErr", + "code": 400, + "error_code": 10101, + "description": "consumer must be deliver all on workqueue stream", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerNameTooLongErrF", + "code": 400, + "error_code": 10102, + "description": "consumer name is too long, maximum allowed is {max}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerBadDurableNameErr", + "code": 400, + "error_code": 10103, + "description": "durable name can not contain '.', '*', '\u003e'", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerStoreFailedErrF", + "code": 500, + "error_code": 10104, + "description": "error creating store for consumer: {err}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerExistingActiveErr", + "code": 400, + "error_code": 10105, + "description": "consumer already exists and is still active", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerReplacementWithDifferentNameErr", + "code": 400, + "error_code": 10106, + "description": "consumer replacement durable config not the same", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + }, + { + "constant": "JSConsumerDescriptionTooLongErrF", + "code": 400, + "error_code": 10107, + "description": "consumer description is too long, maximum allowed is {max}", + "comment": "", + "help": "", + "url": "", + "deprecates": "" + } +] diff --git a/vendor/github.com/nats-io/nats-server/v2/server/events.go b/vendor/github.com/nats-io/nats-server/v2/server/events.go index 810951f4..2aabcc63 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/events.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/events.go @@ -53,8 +53,8 @@ const ( serverStatsSubj = "$SYS.SERVER.%s.STATSZ" serverDirectReqSubj = "$SYS.REQ.SERVER.%s.%s" serverPingReqSubj = "$SYS.REQ.SERVER.PING.%s" - serverStatsPingReqSubj = "$SYS.REQ.SERVER.PING" // use $SYS.REQ.SERVER.PING.STATSZ instead - leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT" + serverStatsPingReqSubj = "$SYS.REQ.SERVER.PING" // use $SYS.REQ.SERVER.PING.STATSZ instead + leafNodeConnectEventSubj = "$SYS.ACCOUNT.%s.LEAFNODE.CONNECT" // for internal use only remoteLatencyEventSubj = "$SYS.LATENCY.M2.%s" inboxRespSubj = "$SYS._INBOX.%s.%s" @@ -157,6 +157,7 @@ type ServerInfo struct { Host string `json:"host"` ID string `json:"id"` Cluster string `json:"cluster,omitempty"` + Domain string `json:"domain,omitempty"` Version string `json:"ver"` Seq uint64 `json:"seq"` JetStream bool `json:"jetstream"` @@ -165,23 +166,25 @@ type ServerInfo struct { // ClientInfo is detailed information about the client forming a connection. type ClientInfo struct { - Start *time.Time `json:"start,omitempty"` - Host string `json:"host,omitempty"` - ID uint64 `json:"id,omitempty"` - Account string `json:"acc"` - Service string `json:"svc,omitempty"` - User string `json:"user,omitempty"` - Name string `json:"name,omitempty"` - Lang string `json:"lang,omitempty"` - Version string `json:"ver,omitempty"` - RTT time.Duration `json:"rtt,omitempty"` - Server string `json:"server,omitempty"` - Cluster string `json:"cluster,omitempty"` - Stop *time.Time `json:"stop,omitempty"` - Jwt string `json:"jwt,omitempty"` - IssuerKey string `json:"issuer_key,omitempty"` - NameTag string `json:"name_tag,omitempty"` - Tags jwt.TagList `json:"tags,omitempty"` + Start *time.Time `json:"start,omitempty"` + Host string `json:"host,omitempty"` + ID uint64 `json:"id,omitempty"` + Account string `json:"acc"` + Service string `json:"svc,omitempty"` + User string `json:"user,omitempty"` + Name string `json:"name,omitempty"` + Lang string `json:"lang,omitempty"` + Version string `json:"ver,omitempty"` + RTT time.Duration `json:"rtt,omitempty"` + Server string `json:"server,omitempty"` + Cluster string `json:"cluster,omitempty"` + Stop *time.Time `json:"stop,omitempty"` + Jwt string `json:"jwt,omitempty"` + IssuerKey string `json:"issuer_key,omitempty"` + NameTag string `json:"name_tag,omitempty"` + Tags jwt.TagList `json:"tags,omitempty"` + Kind string `json:"kind,omitempty"` + ClientType string `json:"client_type,omitempty"` } // ServerStats hold various statistics that we will periodically send out. @@ -199,6 +202,8 @@ type ServerStats struct { SlowConsumers int64 `json:"slow_consumers"` Routes []*RouteStat `json:"routes,omitempty"` Gateways []*GatewayStat `json:"gateways,omitempty"` + ActiveServers int `json:"active_servers,omitempty"` + JetStream *JetStreamVarz `json:"jetstream,omitempty"` } // RouteStat holds route statistics. @@ -266,6 +271,7 @@ RESET: id := s.info.ID host := s.info.Host servername := s.info.Name + domain := s.info.Domain seqp := &s.sys.seq js := s.info.JetStream cluster := s.info.Cluster @@ -290,6 +296,7 @@ RESET: case pm := <-sendq: if pm.si != nil { pm.si.Name = servername + pm.si.Domain = domain pm.si.Host = host pm.si.Cluster = cluster pm.si.ID = id @@ -489,7 +496,6 @@ func (s *Server) checkRemoteServers() { s.Debugf("Detected orphan remote server: %q", sid) // Simulate it going away. s.processRemoteServerShutdown(sid) - delete(s.sys.servers, sid) } } if s.sys.sweeper != nil { @@ -535,7 +541,7 @@ func routeStat(r *client) *RouteStat { // Actual send method for statz updates. // Lock should be held. func (s *Server) sendStatsz(subj string) { - m := ServerStatsMsg{} + var m ServerStatsMsg s.updateServerUsage(&m.Stats) m.Stats.Start = s.start m.Stats.Connections = len(s.clients) @@ -547,10 +553,11 @@ func (s *Server) sendStatsz(subj string) { m.Stats.Sent.Bytes = atomic.LoadInt64(&s.outBytes) m.Stats.SlowConsumers = atomic.LoadInt64(&s.slowConsumers) m.Stats.NumSubs = s.numSubscriptions() - + // Routes for _, r := range s.routes { m.Stats.Routes = append(m.Stats.Routes, routeStat(r)) } + // Gateways if s.gateway.enabled { gw := s.gateway gw.RLock() @@ -578,6 +585,36 @@ func (s *Server) sendStatsz(subj string) { } gw.RUnlock() } + // Active Servers + m.Stats.ActiveServers = 1 + if s.sys != nil { + m.Stats.ActiveServers += len(s.sys.servers) + } + // JetStream + if js := s.js; js != nil { + jStat := &JetStreamVarz{} + s.mu.Unlock() + js.mu.RLock() + c := js.config + c.StoreDir = _EMPTY_ + jStat.Config = &c + js.mu.RUnlock() + jStat.Stats = js.usageStats() + if mg := js.getMetaGroup(); mg != nil { + if mg.Leader() { + jStat.Meta = s.raftNodeToClusterInfo(mg) + } else { + // non leader only include a shortened version without peers + jStat.Meta = &ClusterInfo{ + Name: s.ClusterName(), + Leader: s.serverNameForNode(mg.GroupLeader()), + } + } + } + m.Stats.JetStream = jStat + s.mu.Lock() + } + // Send message. s.sendInternalMsg(subj, _EMPTY_, &m.Server, &m) } @@ -595,11 +632,17 @@ func (s *Server) heartbeatStatsz() { s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) } +func (s *Server) sendStatszUpdate() { + s.mu.Lock() + s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) + s.mu.Unlock() +} + // This should be wrapChk() to setup common locking. func (s *Server) startStatszTimer() { // We will start by sending out more of these and trail off to the statsz being the max. - s.sys.cstatsz = time.Second - // Send out the first one only after a second. + s.sys.cstatsz = 250 * time.Millisecond + // Send out the first one after 250ms. s.sys.stmr = time.AfterFunc(s.sys.cstatsz, s.wrapChk(s.heartbeatStatsz)) } @@ -689,37 +732,37 @@ func (s *Server) initEventTracking() { } monSrvc := map[string]msgHandler{ "STATSZ": s.statszReq, - "VARZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "VARZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &VarzEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Varz(&optz.VarzOptions) }) }, - "SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &SubszEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Subsz(&optz.SubszOptions) }) }, - "CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &ConnzEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Connz(&optz.ConnzOptions) }) }, - "ROUTEZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "ROUTEZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &RoutezEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Routez(&optz.RoutezOptions) }) }, - "GATEWAYZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "GATEWAYZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &GatewayzEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Gatewayz(&optz.GatewayzOptions) }) }, - "LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &LeafzEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Leafz(&optz.LeafzOptions) }) }, - "ACCOUNTZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "ACCOUNTZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &AccountzEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Accountz(&optz.AccountzOptions) }) }, - "JSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &JszEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Jsz(&optz.JSzOptions) }) + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { return s.Jsz(&optz.JSzOptions) }) }, } for name, req := range monSrvc { @@ -740,9 +783,9 @@ func (s *Server) initEventTracking() { } } monAccSrvc := map[string]msgHandler{ - "SUBSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "SUBSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &SubszEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(subject); err != nil { return nil, err } else { @@ -752,9 +795,9 @@ func (s *Server) initEventTracking() { } }) }, - "CONNZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "CONNZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &ConnzEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(subject); err != nil { return nil, err } else { @@ -763,9 +806,9 @@ func (s *Server) initEventTracking() { } }) }, - "LEAFZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "LEAFZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &LeafzEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(subject); err != nil { return nil, err } else { @@ -774,9 +817,9 @@ func (s *Server) initEventTracking() { } }) }, - "JSZ": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "JSZ": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &JszEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(subject); err != nil { return nil, err } else { @@ -785,9 +828,9 @@ func (s *Server) initEventTracking() { } }) }, - "INFO": func(sub *subscription, _ *client, subject, reply string, msg []byte) { + "INFO": func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { optz := &AccInfoEventOptions{} - s.zReq(reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { + s.zReq(c, reply, msg, &optz.EventFilterOptions, optz, func() (interface{}, error) { if acc, err := extractAccount(subject); err != nil { return nil, err } else { @@ -834,11 +877,11 @@ func (s *Server) addSystemAccountExports(sacc *Account) { } // accountClaimUpdate will receive claim updates for accounts. -func (s *Server) accountClaimUpdate(sub *subscription, _ *client, subject, resp string, msg []byte) { +func (s *Server) accountClaimUpdate(sub *subscription, _ *client, _ *Account, subject, resp string, msg []byte) { if !s.EventsEnabled() { return } - pubKey := "" + var pubKey string toks := strings.Split(subject, tsep) if len(toks) == accUpdateTokensNew { pubKey = toks[accReqAccIndex] @@ -880,10 +923,15 @@ func (s *Server) processRemoteServerShutdown(sid string) { } return true }) + delete(s.sys.servers, sid) +} + +func (s *Server) sameDomain(domain string) bool { + return domain == _EMPTY_ || s.info.Domain == _EMPTY_ || domain == s.info.Domain } // remoteServerShutdownEvent is called when we get an event from another server shutting down. -func (s *Server) remoteServerShutdown(sub *subscription, _ *client, subject, reply string, msg []byte) { +func (s *Server) remoteServerShutdown(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) { s.mu.Lock() defer s.mu.Unlock() if !s.eventsEnabled() { @@ -894,13 +942,8 @@ func (s *Server) remoteServerShutdown(sub *subscription, _ *client, subject, rep s.Debugf("Received remote server shutdown on bad subject %q", subject) return } - - sid := toks[serverSubjectIndex] - if su := s.sys.servers[sid]; su != nil { - s.processRemoteServerShutdown(sid) - } - if len(msg) == 0 { + s.Errorf("Remote server sent invalid (empty) shutdown message to %q", subject) return } @@ -911,26 +954,31 @@ func (s *Server) remoteServerShutdown(sub *subscription, _ *client, subject, rep return } // Additional processing here. + if !s.sameDomain(si.Domain) { + return + } node := string(getHash(si.Name)) - s.nodeToInfo.Store(node, nodeInfo{si.Name, si.Cluster, si.ID, true, true}) + s.nodeToInfo.Store(node, nodeInfo{si.Name, si.Cluster, si.Domain, si.ID, true, true}) + + sid := toks[serverSubjectIndex] + if su := s.sys.servers[sid]; su != nil { + s.processRemoteServerShutdown(sid) + } } // remoteServerUpdate listens for statsz updates from other servers. -func (s *Server) remoteServerUpdate(sub *subscription, _ *client, subject, reply string, msg []byte) { +func (s *Server) remoteServerUpdate(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) { var ssm ServerStatsMsg if err := json.Unmarshal(msg, &ssm); err != nil { s.Debugf("Received bad server info for remote server update") return } si := ssm.Server - node := string(getHash(si.Name)) - if _, ok := s.nodeToInfo.Load(node); !ok { - // Since we have not seen this one they probably have not seen us so send out our update. - s.mu.Lock() - s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) - s.mu.Unlock() + if !s.sameDomain(si.Domain) { + return } - s.nodeToInfo.Store(node, nodeInfo{si.Name, si.Cluster, si.ID, false, si.JetStream}) + node := string(getHash(si.Name)) + s.nodeToInfo.Store(node, nodeInfo{si.Name, si.Cluster, si.Domain, si.ID, false, si.JetStream}) } // updateRemoteServer is called when we have an update from a remote server. @@ -960,8 +1008,12 @@ func (s *Server) processNewServer(ms *ServerInfo) { // connect update to make sure they switch this account to interest only mode. s.ensureGWsInterestOnlyForLeafNodes() // Add to our nodeToName - node := string(getHash(ms.Name)) - s.nodeToInfo.Store(node, nodeInfo{ms.Name, ms.Cluster, ms.ID, false, ms.JetStream}) + if s.sameDomain(ms.Domain) { + node := string(getHash(ms.Name)) + s.nodeToInfo.Store(node, nodeInfo{ms.Name, ms.Cluster, ms.Domain, ms.ID, false, ms.JetStream}) + } + // Announce ourselves.. + s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) } // If GW is enabled on this server and there are any leaf node connections, @@ -1014,7 +1066,7 @@ func (s *Server) shutdownEventing() { } // Request for our local connection count. -func (s *Server) connsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) { +func (s *Server) connsRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if !s.eventsRunning() { return } @@ -1025,9 +1077,11 @@ func (s *Server) connsRequest(sub *subscription, _ *client, subject, reply strin } a := tk[accReqAccIndex] m := accNumConnsReq{Account: a} - if err := json.Unmarshal(msg, &m); err != nil { - s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err) - return + if _, msg := c.msgParts(rmsg); len(msg) > 0 { + if err := json.Unmarshal(msg, &m); err != nil { + s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err) + return + } } if m.Account != a { s.sys.client.Errorf("Error unmarshalled account does not match subject") @@ -1051,7 +1105,7 @@ func (s *Server) connsRequest(sub *subscription, _ *client, subject, reply strin } // leafNodeConnected is an event we will receive when a leaf node for a given account connects. -func (s *Server) leafNodeConnected(sub *subscription, _ *client, subject, reply string, msg []byte) { +func (s *Server) leafNodeConnected(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) { m := accNumConnsReq{} if err := json.Unmarshal(msg, &m); err != nil { s.sys.client.Errorf("Error unmarshalling account connections request message: %v", err) @@ -1059,7 +1113,7 @@ func (s *Server) leafNodeConnected(sub *subscription, _ *client, subject, reply } s.mu.Lock() - na := m.Account == "" || !s.eventsEnabled() || !s.gateway.enabled + na := m.Account == _EMPTY_ || !s.eventsEnabled() || !s.gateway.enabled s.mu.Unlock() if na { @@ -1172,7 +1226,7 @@ func (s *Server) filterRequest(fOpts *EventFilterOptions) bool { } // statszReq is a request for us to respond with current statsz. -func (s *Server) statszReq(sub *subscription, _ *client, subject, reply string, msg []byte) { +func (s *Server) statszReq(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) { if !s.EventsEnabled() || reply == _EMPTY_ { return } @@ -1198,7 +1252,7 @@ func (s *Server) statszReq(sub *subscription, _ *client, subject, reply string, var errSkipZreq = errors.New("filtered response") -func (s *Server) zReq(reply string, msg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) { +func (s *Server) zReq(c *client, reply string, rmsg []byte, fOpts *EventFilterOptions, optz interface{}, respf func() (interface{}, error)) { if !s.EventsEnabled() || reply == _EMPTY_ { return } @@ -1206,6 +1260,7 @@ func (s *Server) zReq(reply string, msg []byte, fOpts *EventFilterOptions, optz response := map[string]interface{}{"server": server} var err error status := 0 + _, msg := c.msgParts(rmsg) if len(msg) != 0 { if err = json.Unmarshal(msg, optz); err != nil { status = http.StatusBadRequest // status is only included on error, so record how far execution got @@ -1230,7 +1285,7 @@ func (s *Server) zReq(reply string, msg []byte, fOpts *EventFilterOptions, optz } // remoteConnsUpdate gets called when we receive a remote update from another server. -func (s *Server) remoteConnsUpdate(sub *subscription, _ *client, subject, reply string, msg []byte) { +func (s *Server) remoteConnsUpdate(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) { if !s.eventsRunning() { return } @@ -1408,18 +1463,20 @@ func (s *Server) accountConnectEvent(c *client) { Time: time.Now().UTC(), }, Client: ClientInfo{ - Start: &c.start, - Host: c.host, - ID: c.cid, - Account: accForClient(c), - User: c.getRawAuthUser(), - Name: c.opts.Name, - Lang: c.opts.Lang, - Version: c.opts.Version, - Jwt: c.opts.JWT, - IssuerKey: issuerForClient(c), - Tags: c.tags, - NameTag: c.nameTag, + Start: &c.start, + Host: c.host, + ID: c.cid, + Account: accForClient(c), + User: c.getRawAuthUser(), + Name: c.opts.Name, + Lang: c.opts.Lang, + Version: c.opts.Version, + Jwt: c.opts.JWT, + IssuerKey: issuerForClient(c), + Tags: c.tags, + NameTag: c.nameTag, + Kind: c.kindString(), + ClientType: c.clientTypeString(), }, } c.mu.Unlock() @@ -1455,20 +1512,22 @@ func (s *Server) accountDisconnectEvent(c *client, now time.Time, reason string) Time: now, }, Client: ClientInfo{ - Start: &c.start, - Stop: &now, - Host: c.host, - ID: c.cid, - Account: accForClient(c), - User: c.getRawAuthUser(), - Name: c.opts.Name, - Lang: c.opts.Lang, - Version: c.opts.Version, - RTT: c.getRTT(), - Jwt: c.opts.JWT, - IssuerKey: issuerForClient(c), - Tags: c.tags, - NameTag: c.nameTag, + Start: &c.start, + Stop: &now, + Host: c.host, + ID: c.cid, + Account: accForClient(c), + User: c.getRawAuthUser(), + Name: c.opts.Name, + Lang: c.opts.Lang, + Version: c.opts.Version, + RTT: c.getRTT(), + Jwt: c.opts.JWT, + IssuerKey: issuerForClient(c), + Tags: c.tags, + NameTag: c.nameTag, + Kind: c.kindString(), + ClientType: c.clientTypeString(), }, Sent: DataStats{ Msgs: atomic.LoadInt64(&c.inMsgs), @@ -1504,20 +1563,22 @@ func (s *Server) sendAuthErrorEvent(c *client) { Time: now, }, Client: ClientInfo{ - Start: &c.start, - Stop: &now, - Host: c.host, - ID: c.cid, - Account: accForClient(c), - User: c.getRawAuthUser(), - Name: c.opts.Name, - Lang: c.opts.Lang, - Version: c.opts.Version, - RTT: c.getRTT(), - Jwt: c.opts.JWT, - IssuerKey: issuerForClient(c), - Tags: c.tags, - NameTag: c.nameTag, + Start: &c.start, + Stop: &now, + Host: c.host, + ID: c.cid, + Account: accForClient(c), + User: c.getRawAuthUser(), + Name: c.opts.Name, + Lang: c.opts.Lang, + Version: c.opts.Version, + RTT: c.getRTT(), + Jwt: c.opts.JWT, + IssuerKey: issuerForClient(c), + Tags: c.tags, + NameTag: c.nameTag, + Kind: c.kindString(), + ClientType: c.clientTypeString(), }, Sent: DataStats{ Msgs: c.inMsgs, @@ -1537,9 +1598,10 @@ func (s *Server) sendAuthErrorEvent(c *client) { s.mu.Unlock() } -// Internal message callback. If the msg is needed past the callback it is -// required to be copied. -type msgHandler func(sub *subscription, client *client, subject, reply string, msg []byte) +// Internal message callback. +// If the msg is needed past the callback it is required to be copied. +// rmsg contains header and the message. use client.msgParts(rmsg) to split them apart +type msgHandler func(sub *subscription, client *client, acc *Account, subject, reply string, rmsg []byte) // Create an internal subscription. sysSubscribeQ for queue groups. func (s *Server) sysSubscribe(subject string, cb msgHandler) (*subscription, error) { @@ -1615,7 +1677,7 @@ func remoteLatencySubjectForResponse(subject []byte) string { } // remoteLatencyUpdate is used to track remote latency measurements for tracking on exported services. -func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, subject, _ string, msg []byte) { +func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { if !s.eventsRunning() { return } @@ -1678,7 +1740,7 @@ func (s *Server) remoteLatencyUpdate(sub *subscription, _ *client, subject, _ st // This is used for all inbox replies so that we do not send supercluster wide interest // updates for every request. Same trick used in modern NATS clients. -func (s *Server) inboxReply(sub *subscription, c *client, subject, reply string, msg []byte) { +func (s *Server) inboxReply(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) { s.mu.Lock() if !s.eventsEnabled() || s.sys.replies == nil { s.mu.Unlock() @@ -1688,7 +1750,7 @@ func (s *Server) inboxReply(sub *subscription, c *client, subject, reply string, s.mu.Unlock() if ok && cb != nil { - cb(sub, c, subject, reply, msg) + cb(sub, c, acc, subject, reply, msg) } } @@ -1751,7 +1813,7 @@ func totalSubs(rr *SublistResult, qg []byte) (nsubs int32) { // Allows users of large systems to debug active subscribers for a given subject. // Payload should be the subject of interest. -func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) debugSubscribers(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { // Even though this is an internal only subscription, meaning interest was not forwarded, we could // get one here from a GW in optimistic mode. Ignore for now. // FIXME(dlc) - Should we send no interest here back to the GW? @@ -1818,7 +1880,7 @@ func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply s // Create direct reply inbox that we multiplex under the WC replies. replySubj := s.newRespInbox() // Store our handler. - s.sys.replies[replySubj] = func(sub *subscription, _ *client, subject, _ string, msg []byte) { + s.sys.replies[replySubj] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { if n, err := strconv.Atoi(string(msg)); err == nil { atomic.AddInt32(&nsubs, int32(n)) } @@ -1861,7 +1923,7 @@ func (s *Server) debugSubscribers(sub *subscription, c *client, subject, reply s // Request for our local subscription count. This will come from a remote origin server // that received the initial request. -func (s *Server) nsubsRequest(sub *subscription, _ *client, subject, reply string, msg []byte) { +func (s *Server) nsubsRequest(sub *subscription, _ *client, _ *Account, subject, reply string, msg []byte) { if !s.eventsRunning() { return } @@ -1911,7 +1973,7 @@ func issuerForClient(c *client) (issuerKey string) { return } issuerKey = c.user.SigningKey - if issuerKey == "" && c.user.Account != nil { + if issuerKey == _EMPTY_ && c.user.Account != nil { issuerKey = c.user.Account.Name } return diff --git a/vendor/github.com/nats-io/nats-server/v2/server/filestore.go b/vendor/github.com/nats-io/nats-server/v2/server/filestore.go index 4036e5bf..1696d944 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/filestore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/filestore.go @@ -16,6 +16,8 @@ package server import ( "archive/tar" "bytes" + "crypto/cipher" + "crypto/rand" "crypto/sha256" "encoding/binary" "encoding/hex" @@ -25,7 +27,6 @@ import ( "hash" "io" "io/ioutil" - "math/rand" "net" "os" "path" @@ -36,6 +37,8 @@ import ( "github.com/klauspost/compress/s2" "github.com/minio/highwayhash" + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/chacha20poly1305" ) type FileStoreConfig struct { @@ -79,16 +82,17 @@ type fileStore struct { syncTmr *time.Timer cfg FileStreamInfo fcfg FileStoreConfig + prf keyGen + aek cipher.AEAD lmb *msgBlock blks []*msgBlock hh hash.Hash64 qch chan struct{} cfs []*consumerFileStore - fsi map[string]seqSlice - fsis *simpleState + sips int closed bool fip bool - sips int + tms bool } // Represents a message store block and its data. @@ -98,15 +102,21 @@ type msgBlock struct { last msgId mu sync.RWMutex fs *fileStore + aek cipher.AEAD + bek *chacha20.Cipher + seed []byte + nonce []byte mfn string mfd *os.File ifn string ifd *os.File liwsz int64 - index uint64 // User visible message count. + index uint64 bytes uint64 // User visible bytes count. rbytes uint64 // Total bytes (raw) including deleted. Used for rolling to new blk. - msgs uint64 + msgs uint64 // User visible message count. + fss map[string]*SimpleState + sfn string lwits int64 lwts int64 llts int64 @@ -170,6 +180,10 @@ const ( blkScan = "%d.blk" // used to scan index file names. indexScan = "%d.idx" + // used to load per subject meta information. + fssScan = "%d.fss" + // used to store our block encryption key. + keyScan = "%d.key" // This is where we keep state on consumers. consumerDir = "obs" // Index file for a consumer. @@ -188,9 +202,15 @@ const ( coalesceMinimum = 16 * 1024 // maxFlushWait is maximum we will wait to gather messages to flush. maxFlushWait = 8 * time.Millisecond + // Metafiles for streams and consumers. JetStreamMetaFile = "meta.inf" JetStreamMetaFileSum = "meta.sum" + JetStreamMetaFileKey = "meta.key" + + // AEK key sizes + metaKeySize = 72 + blkKeySize = 72 // Default stream block size. defaultStreamBlockSize = 16 * 1024 * 1024 // 16MB @@ -205,10 +225,10 @@ const ( ) func newFileStore(fcfg FileStoreConfig, cfg StreamConfig) (*fileStore, error) { - return newFileStoreWithCreated(fcfg, cfg, time.Now().UTC()) + return newFileStoreWithCreated(fcfg, cfg, time.Now().UTC(), nil) } -func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created time.Time) (*fileStore, error) { +func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created time.Time, prf keyGen) (*fileStore, error) { if cfg.Name == _EMPTY_ { return nil, fmt.Errorf("name required") } @@ -247,6 +267,7 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim fs := &fileStore{ fcfg: fcfg, cfg: FileStreamInfo{Created: created, StreamConfig: cfg}, + prf: prf, qch: make(chan struct{}), } @@ -270,23 +291,14 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim return nil, fmt.Errorf("could not create hash: %v", err) } + // Always track per subject information. + fs.tms = true + // Recover our message state. if err := fs.recoverMsgs(); err != nil { return nil, err } - // Check to see if we have lots of messages and existing consumers. - // If they could be filtered we should generate an index here. - const lowWaterMarkMsgs = 8192 - if fs.state.Msgs > lowWaterMarkMsgs { - // If we have one subject that is not a wildcard we can skip. - if !(len(cfg.Subjects) == 1 && subjectIsLiteral(cfg.Subjects[0])) { - if ofis, _ := ioutil.ReadDir(odir); len(ofis) > 0 { - fs.genFilterIndex() - } - } - } - // Write our meta data iff does not exist. meta := path.Join(fcfg.StoreDir, JetStreamMetaFile) if _, err := os.Stat(meta); err != nil && os.IsNotExist(err) { @@ -294,6 +306,16 @@ func newFileStoreWithCreated(fcfg FileStoreConfig, cfg StreamConfig, created tim return nil, err } } + // If we expect to be encrypted check that what we are restoring is not plaintext. + // This can happen on snapshot restores or conversions. + if fs.prf != nil { + keyFile := path.Join(fs.fcfg.StoreDir, JetStreamMetaFileKey) + if _, err := os.Stat(keyFile); err != nil && os.IsNotExist(err) { + if err := fs.writeStreamMeta(); err != nil { + return nil, err + } + } + } fs.syncTmr = time.AfterFunc(fs.fcfg.SyncInterval, fs.syncBlocks) @@ -304,7 +326,6 @@ func (fs *fileStore) UpdateConfig(cfg *StreamConfig) error { if fs.isClosed() { return ErrStoreClosed } - if cfg.Name == _EMPTY_ { return fmt.Errorf("name required") } @@ -367,9 +388,53 @@ func dynBlkSize(retention RetentionPolicy, maxBytes int64) uint64 { } } +// Generate an asset encryption key from the context and server PRF. +func (fs *fileStore) genEncryptionKeys(context string) (aek cipher.AEAD, bek *chacha20.Cipher, seed, encrypted []byte, err error) { + if fs.prf == nil { + return nil, nil, nil, nil, errNoEncryption + } + // Generate key encryption key. + kek, err := chacha20poly1305.NewX(fs.prf([]byte(context))) + if err != nil { + return nil, nil, nil, nil, err + } + // Generate random asset encryption key seed. + seed = make([]byte, 32) + rand.Read(seed) + aek, err = chacha20poly1305.NewX(seed) + if err != nil { + return nil, nil, nil, nil, err + } + + // Generate our nonce. Use same buffer to hold encrypted seed. + nonce := make([]byte, kek.NonceSize(), kek.NonceSize()+len(seed)+kek.Overhead()) + rand.Read(nonce) + bek, err = chacha20.NewUnauthenticatedCipher(seed[:], nonce) + if err != nil { + return nil, nil, nil, nil, err + } + + return aek, bek, seed, kek.Seal(nonce, nonce, seed, nil), nil +} + // Write out meta and the checksum. // Lock should be held. func (fs *fileStore) writeStreamMeta() error { + if fs.prf != nil && fs.aek == nil { + key, _, _, encrypted, err := fs.genEncryptionKeys(fs.cfg.Name) + if err != nil { + return err + } + fs.aek = key + keyFile := path.Join(fs.fcfg.StoreDir, JetStreamMetaFileKey) + if _, err := os.Stat(keyFile); err != nil && !os.IsNotExist(err) { + return err + } + if err := ioutil.WriteFile(keyFile, encrypted, defaultFilePerms); err != nil { + return err + } + } + meta := path.Join(fs.fcfg.StoreDir, JetStreamMetaFile) if _, err := os.Stat(meta); err != nil && !os.IsNotExist(err) { return err @@ -378,6 +443,13 @@ func (fs *fileStore) writeStreamMeta() error { if err != nil { return err } + // Encrypt if needed. + if fs.aek != nil { + nonce := make([]byte, fs.aek.NonceSize(), fs.aek.NonceSize()+len(b)+fs.aek.Overhead()) + rand.Read(nonce) + b = fs.aek.Seal(nonce, nonce, b, nil) + } + if err := ioutil.WriteFile(meta, b, defaultFilePerms); err != nil { return err } @@ -397,56 +469,121 @@ const checksumSize = 8 // This is the max room needed for index header. const indexHdrSize = 7*binary.MaxVarintLen64 + hdrLen + checksumSize -func (fs *fileStore) recoverMsgBlock(fi os.FileInfo, index uint64) *msgBlock { +func (fs *fileStore) recoverMsgBlock(fi os.FileInfo, index uint64) (*msgBlock, error) { mb := &msgBlock{fs: fs, index: index, cexp: fs.fcfg.CacheExpire} mdir := path.Join(fs.fcfg.StoreDir, msgDir) mb.mfn = path.Join(mdir, fi.Name()) mb.ifn = path.Join(mdir, fmt.Sprintf(indexScan, index)) + mb.sfn = path.Join(mdir, fmt.Sprintf(fssScan, index)) if mb.hh == nil { key := sha256.Sum256(fs.hashKeyForBlock(index)) mb.hh, _ = highwayhash.New64(key[:]) } + var createdKeys bool + + // Check if encryption is enabled. + if fs.prf != nil { + ekey, err := ioutil.ReadFile(path.Join(mdir, fmt.Sprintf(keyScan, mb.index))) + if err != nil { + // We do not seem to have keys even though we should. Could be a plaintext conversion. + // Create the keys and we will doubel check below. + if err := fs.genEncryptionKeysForBlock(mb); err != nil { + return nil, err + } + createdKeys = true + } else { + if len(ekey) != blkKeySize { + return nil, errBadKeySize + } + // Recover key encryption key. + kek, err := chacha20poly1305.NewX(fs.prf([]byte(fmt.Sprintf("%s:%d", fs.cfg.Name, mb.index)))) + if err != nil { + return nil, err + } + ns := kek.NonceSize() + seed, err := kek.Open(nil, ekey[:ns], ekey[ns:], nil) + if err != nil { + return nil, err + } + mb.seed, mb.nonce = seed, ekey[:ns] + if mb.aek, err = chacha20poly1305.NewX(seed); err != nil { + return nil, err + } + if mb.bek, err = chacha20.NewUnauthenticatedCipher(seed, ekey[:ns]); err != nil { + return nil, err + } + } + } + + // If we created keys here, let's check the data and if it is plaintext convert here. + if createdKeys { + buf, err := mb.loadBlock(nil) + if err != nil { + return nil, err + } + if err := mb.indexCacheBuf(buf); err != nil { + // This likely indicates this was already encrypted or corrupt. + mb.cache = nil + return nil, err + } + // Undo cache from above for later. + mb.cache = nil + wbek, err := chacha20.NewUnauthenticatedCipher(mb.seed, mb.nonce) + if err != nil { + return nil, err + } + wbek.XORKeyStream(buf, buf) + if err := ioutil.WriteFile(mb.mfn, buf, defaultFilePerms); err != nil { + return nil, err + } + // Remove the index file here since it will be in plaintext as well so we just rebuild. + os.Remove(mb.ifn) + } + // Open up the message file, but we will try to recover from the index file. // We will check that the last checksums match. file, err := os.Open(mb.mfn) if err != nil { - return nil + return nil, err } defer file.Close() - if fi, _ := file.Stat(); fi != nil { + if fi, err := file.Stat(); fi != nil { mb.rbytes = uint64(fi.Size()) } else { - return nil + return nil, err } + // Grab last checksum from main block file. + var lchk [8]byte + file.ReadAt(lchk[:], fi.Size()-8) + file.Close() // Read our index file. Use this as source of truth if possible. if err := mb.readIndexInfo(); err == nil { // Quick sanity check here. // Note this only checks that the message blk file is not newer then this file. - var lchk [8]byte - file.ReadAt(lchk[:], fi.Size()-8) if bytes.Equal(lchk[:], mb.lchk[:]) { + if fs.tms { + mb.readPerSubjectInfo() + } fs.blks = append(fs.blks, mb) - return mb + return mb, nil } } - // Close here since we need to rebuild state. - file.Close() - // If we get data loss rebuilding the message block state record that with the fs itself. if ld, _ := mb.rebuildState(); ld != nil { fs.rebuildState(ld) } + // Rewrite this to make sure we are sync'd. mb.writeIndexInfo() fs.blks = append(fs.blks, mb) fs.lmb = mb - return mb + return mb, nil } func (fs *fileStore) lostData() *LostStreamData { @@ -492,14 +629,25 @@ func (mb *msgBlock) rebuildState() (*LostStreamData, error) { startLastSeq := mb.last.seq // Clear state we need to rebuild. - mb.msgs, mb.bytes, mb.rbytes = 0, 0, 0 + mb.msgs, mb.bytes, mb.rbytes, mb.fss = 0, 0, 0, nil mb.last.seq, mb.last.ts = 0, 0 firstNeedsSet := true - buf, err := ioutil.ReadFile(mb.mfn) + buf, err := mb.loadBlock(nil) if err != nil { return nil, err } + + // Check if we need to decrypt. + if mb.bek != nil && len(buf) > 0 { + // Recreate to reset counter. + mb.bek, err = chacha20.NewUnauthenticatedCipher(mb.seed, mb.nonce) + if err != nil { + return nil, err + } + mb.bek.XORKeyStream(buf, buf) + } + mb.rbytes = uint64(len(buf)) addToDmap := func(seq uint64) { @@ -547,6 +695,11 @@ func (mb *msgBlock) rebuildState() (*LostStreamData, error) { return &ld } + // Rebuild per subject info. + if mb.fs.tms { + mb.fss = make(map[string]*SimpleState) + } + for index, lbuf := uint32(0), uint32(len(buf)); index < lbuf; { if index+msgHdrSize >= lbuf { truncate(index) @@ -600,8 +753,8 @@ func (mb *msgBlock) rebuildState() (*LostStreamData, error) { } if !deleted { + data := buf[index+msgHdrSize : index+rl] if hh := mb.hh; hh != nil { - data := buf[index+msgHdrSize : index+rl] hh.Reset() hh.Write(hdr[4:20]) hh.Write(data[:slen]) @@ -629,8 +782,20 @@ func (mb *msgBlock) rebuildState() (*LostStreamData, error) { mb.msgs++ mb.bytes += uint64(rl) mb.rbytes += uint64(rl) - } + // Do per subject info. + if mb.fss != nil { + if subj := string(data[:slen]); len(subj) > 0 { + if ss := mb.fss[subj]; ss != nil { + ss.Msgs++ + ss.Last = seq + } else { + mb.fss[subj] = &SimpleState{Msgs: 1, First: seq, Last: seq} + } + } + } + } + // Advance to next record. index += rl } @@ -663,7 +828,7 @@ func (fs *fileStore) recoverMsgs() error { for _, fi := range fis { var index uint64 if n, err := fmt.Sscanf(fi.Name(), blkScan, &index); err == nil && n == 1 { - if mb := fs.recoverMsgBlock(fi, index); mb != nil { + if mb, err := fs.recoverMsgBlock(fi, index); err == nil && mb != nil { if fs.state.FirstSeq == 0 || mb.first.seq < fs.state.FirstSeq { fs.state.FirstSeq = mb.first.seq fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC() @@ -674,6 +839,8 @@ func (fs *fileStore) recoverMsgs() error { } fs.state.Msgs += mb.msgs fs.state.Bytes += mb.bytes + } else { + return err } } } @@ -696,14 +863,122 @@ func (fs *fileStore) recoverMsgs() error { fs.enforceBytesLimit() // Do age checks too, make sure to call in place. - if fs.cfg.MaxAge != 0 && fs.state.Msgs > 0 { + if fs.cfg.MaxAge != 0 { + fs.expireMsgsOnRecover() fs.startAgeChk() - fs.expireMsgsLocked() } return nil } +// Will expire msgs that have aged out on restart. +// We will treat this differently in case we have a recovery +// that will expire alot of messages on startup. Should only be called +// on startup. Lock should be held. +func (fs *fileStore) expireMsgsOnRecover() { + if fs.state.Msgs == 0 { + return + } + + var minAge = time.Now().UnixNano() - int64(fs.cfg.MaxAge) + var purged, bytes uint64 + var deleted int + + for _, mb := range fs.blks { + mb.mu.Lock() + if minAge < mb.first.ts { + mb.mu.Unlock() + break + } + // Can we remove whole block here? + if mb.last.ts <= minAge { + purged += mb.msgs + bytes += mb.bytes + mb.dirtyCloseWithRemove(true) + newFirst := mb.last.seq + 1 + mb.mu.Unlock() + // Update fs first here as well. + fs.state.FirstSeq = newFirst + fs.state.FirstTime = time.Time{} + deleted++ + continue + } + + // If we are here we have to process the interior messages of this blk. + if err := mb.loadMsgsWithLock(); err != nil { + mb.mu.Unlock() + break + } + + // Walk messages and remove if expired. + for seq := mb.first.seq; seq <= mb.last.seq; seq++ { + sm, err := mb.cacheLookupWithLock(seq) + // Process interior deleted msgs. + if err == errDeletedMsg { + // Update dmap. + if len(mb.dmap) > 0 { + delete(mb.dmap, seq) + if len(mb.dmap) == 0 { + mb.dmap = nil + } + } + continue + } + // Break on other errors. + if err != nil || sm == nil { + break + } + + // No error and sm != nil from here onward. + + // Check for done. + if sm.ts > minAge { + mb.first.seq = sm.seq + mb.first.ts = sm.ts + break + } + + // Delete the message here. + sz := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) + mb.bytes -= sz + bytes += sz + mb.msgs-- + purged++ + // Update fss + mb.removeSeqPerSubject(sm.subj, seq) + } + + // Check if empty after processing, could happen if tail of messages are all deleted. + isEmpty := mb.msgs == 0 + if isEmpty { + mb.dirtyCloseWithRemove(true) + // Update fs first here as well. + fs.state.FirstSeq = mb.last.seq + 1 + fs.state.FirstTime = time.Time{} + deleted++ + } else { + // Update fs first seq and time. + fs.state.FirstSeq = mb.first.seq + fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC() + } + mb.mu.Unlock() + + if !isEmpty { + // Make sure to write out our index info. + mb.writeIndexInfo() + } + break + } + + if deleted > 0 { + // Update blks slice. + fs.blks = append(fs.blks[:0:0], fs.blks[deleted:]...) + } + // Update top level accounting. + fs.state.Msgs -= purged + fs.state.Bytes -= bytes +} + // GetSeqFromTime looks for the first sequence number that has // the message with >= timestamp. // FIXME(dlc) - inefficient, and dumb really. Make this better. @@ -738,106 +1013,251 @@ func (fs *fileStore) GetSeqFromTime(t time.Time) uint64 { return 0 } -type seqSlice []uint64 - -func (x seqSlice) Len() int { return len(x) } -func (x seqSlice) Less(i, j int) bool { return x[i] < x[j] } -func (x seqSlice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x seqSlice) Search(n uint64) int { - return sort.Search(len(x), func(i int) bool { return x[i] >= n }) +// This will traverse a message block and generate the filtered pending. +func (mb *msgBlock) filteredPending(subj string, wc bool, seq uint64) (total, first, last uint64) { + mb.mu.Lock() + defer mb.mu.Unlock() + return mb.filteredPendingLocked(subj, wc, seq) } -type simpleState struct { - msgs, first, last uint64 -} +// This will traverse a message block and generate the filtered pending. +// Lock should be held. +func (mb *msgBlock) filteredPendingLocked(subj string, wc bool, seq uint64) (total, first, last uint64) { + if mb.fss == nil { + return 0, 0, 0 + } + + subs := []string{subj} + // If we have a wildcard match against all tracked subjects we know about. + if wc { + subs = subs[:0] + for fsubj := range mb.fss { + if subjectIsSubsetMatch(fsubj, subj) { + subs = append(subs, fsubj) + } + } + } + // If we load the cache for a linear scan we want to expire that cache upon exit. + var shouldExpire bool -// This will generate an index for us on startup to determine num pending for -// filtered consumers easier. -func (fs *fileStore) genFilterIndex() { - fs.mu.Lock() - defer fs.mu.Unlock() + update := func(ss *SimpleState) { + total += ss.Msgs + if first == 0 || ss.First < first { + first = ss.First + } + if ss.Last > last { + last = ss.Last + } + } - fsi := make(map[string]seqSlice) + for i, subj := range subs { + // If the starting seq is less then or equal that means we want all and we do not need to load any messages. + ss := mb.fss[subj] + if ss == nil { + continue + } - for _, mb := range fs.blks { - mb.loadMsgs() - mb.mu.Lock() - fseq, lseq := mb.first.seq, mb.last.seq - for seq := fseq; seq <= lseq; seq++ { - if sm, err := mb.cacheLookupWithLock(seq); sm != nil && err == nil { - fsi[sm.subj] = append(fsi[sm.subj], seq) + // If the seq we are starting at is less then the simple state's first sequence we can just return the total msgs. + if seq <= ss.First { + update(ss) + continue + } + + // We may need to scan this one block since we have a partial set to consider. + // If we are all inclusive then we can do simple math and avoid the scan. + if allInclusive := ss.Msgs == ss.Last-ss.First+1; allInclusive { + update(ss) + // Make sure to compensate for the diff from the head. + if seq > ss.First { + first, total = seq, total-(seq-ss.First) } + continue } - // Expire this cache before moving on. - mb.llts = 0 - mb.expireCacheLocked() - mb.mu.Unlock() - } - fs.fsi = fsi - fs.fsis = &simpleState{fs.state.Msgs, fs.state.FirstSeq, fs.state.LastSeq} -} + // We need to scan this block to compute the correct number of pending for this block. + // We want to only do this once so we will adjust subs and test against them all here. -// Clears out the filter index. -func (fs *fileStore) clearFilterIndex() { - fs.mu.Lock() - fs.fsi, fs.fsis = nil, nil - fs.mu.Unlock() -} + if !mb.cacheAlreadyLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } -// Fetch our num filtered pending from our index. -// Lock should be held. -func (fs *fileStore) getNumFilteredPendingFromIndex(sseq uint64, subj string) (uint64, error) { - cstate := simpleState{fs.state.Msgs, fs.state.FirstSeq, fs.state.LastSeq} - if fs.fsis == nil || *fs.fsis != cstate { - fs.fsi, fs.fsis = nil, nil - return 0, errors.New("state changed, index not valid") - } - var total uint64 - for tsubj, seqs := range fs.fsi { - if subjectIsSubsetMatch(tsubj, subj) { - total += uint64(len(seqs[seqs.Search(sseq):])) + subs = subs[i:] + var all, lseq uint64 + // Grab last applicable sequence as a union of all applicable subjects. + for _, subj := range subs { + if ss := mb.fss[subj]; ss != nil { + all += ss.Msgs + if ss.Last > lseq { + lseq = ss.Last + } + } + } + numScanIn, numScanOut := lseq-seq, seq-mb.first.seq + + isMatch := func(seq uint64) bool { + if sm, _ := mb.cacheLookupWithLock(seq); sm != nil { + if len(subs) == 1 && sm.subj == subs[0] { + return true + } + for _, subj := range subs { + if sm.subj == subj { + return true + } + } + } + return false + } + + // Decide on whether to scan those included or those excluded based on which scan amount is less. + if numScanIn < numScanOut { + for tseq := seq; tseq <= lseq; tseq++ { + if isMatch(tseq) { + total++ + if first == 0 || tseq < first { + first = tseq + } + last = tseq + } + } + } else { + // Here its more efficient to scan the out nodes. + var discard uint64 + for tseq := mb.first.seq; tseq < seq; tseq++ { + if isMatch(tseq) { + discard++ + } + } + total += (all - discard) + // Now make sure we match our first + for tseq := seq; tseq <= lseq; tseq++ { + if isMatch(tseq) { + first = tseq + break + } + } } + // We can bail since we scanned all remaining in this pass. + break } - return total, nil + + // If we loaded this block for this operation go ahead and expire it here. + if shouldExpire { + mb.expireCacheLocked() + } + + return total, first, last } -// Returns number of messages matching the subject starting at sequence sseq. -func (fs *fileStore) NumFilteredPending(sseq uint64, subj string) (total uint64) { +// FilteredState will return the SimpleState associated with the filtered subject and a proposed starting sequence. +func (fs *fileStore) FilteredState(sseq uint64, subj string) SimpleState { fs.mu.RLock() lseq := fs.state.LastSeq if sseq < fs.state.FirstSeq { sseq = fs.state.FirstSeq } - if fs.fsi != nil { - if np, err := fs.getNumFilteredPendingFromIndex(sseq, subj); err == nil { - fs.mu.RUnlock() - return np - } - } fs.mu.RUnlock() - if subj == _EMPTY_ { - if sseq <= lseq { - return lseq - sseq + var ss SimpleState + + // If past the end no results. + if sseq > lseq { + return ss + } + + // If subj is empty or we are not tracking multiple subjects. + if subj == _EMPTY_ || subj == fwcs || !fs.tms { + total := lseq - sseq + 1 + if state := fs.State(); len(state.Deleted) > 0 { + for _, dseq := range state.Deleted { + if dseq >= sseq && dseq <= lseq { + total-- + } + } } - return 0 + ss.Msgs, ss.First, ss.Last = total, sseq, lseq + return ss } - var eq func(string, string) bool - if subjectHasWildcard(subj) { - eq = subjectIsSubsetMatch + wc := subjectHasWildcard(subj) + // Are we tracking multiple subject states? + if fs.tms { + for _, mb := range fs.blks { + // Skip blocks that are less than our starting sequence. + if sseq > atomic.LoadUint64(&mb.last.seq) { + continue + } + t, f, l := mb.filteredPending(subj, wc, sseq) + ss.Msgs += t + if ss.First == 0 || (f > 0 && f < ss.First) { + ss.First = f + } + if l > ss.Last { + ss.Last = l + } + } } else { - eq = func(a, b string) bool { return a == b } + // Fallback to linear scan. + eq := compareFn(subj) + for seq := sseq; seq <= lseq; seq++ { + if sm, _ := fs.msgForSeq(seq); sm != nil && eq(sm.subj, subj) { + ss.Msgs++ + if ss.First == 0 { + ss.First = seq + } + ss.Last = seq + } + } } - for seq := sseq; seq <= lseq; seq++ { - if sm, _ := fs.msgForSeq(seq); sm != nil && eq(sm.subj, subj) { - total++ + return ss +} + +// Will gather complete filtered state for the subject. +// Lock should be held. +func (fs *fileStore) perSubjectState(subj string) (total, first, last uint64) { + if !fs.tms { + return + } + wc := subjectHasWildcard(subj) + for _, mb := range fs.blks { + t, f, l := mb.filteredPending(subj, wc, 1) + total += t + if first == 0 || (f > 0 && f < first) { + first = f + } + if l > last { + last = l } } - return total + return total, first, last +} + +// SubjectsState returns a map of SimpleState for all matching subjects. +func (fs *fileStore) SubjectsState(subject string) map[string]SimpleState { + fs.mu.RLock() + defer fs.mu.RUnlock() + + if !fs.tms || fs.state.Msgs == 0 { + return nil + } + + fss := make(map[string]SimpleState) + for _, mb := range fs.blks { + mb.mu.RLock() + for subj, ss := range mb.fss { + oss := fss[subj] + if oss.First == 0 { // New + fss[subj] = *ss + } else { + // Merge here. + oss.Last, oss.Msgs = ss.Last, oss.Msgs+ss.Msgs + fss[subj] = oss + } + } + mb.mu.RUnlock() + } + return fss } // RegisterStorageUpdates registers a callback for updates to storage changes. @@ -864,9 +1284,7 @@ func (mb *msgBlock) setupWriteCache(buf []byte) { if mb.cache != nil { return } - if buf != nil { - buf = buf[:0] - } + // Setup simple cache. mb.cache = &cache{buf: buf} // Make sure we set the proper cache offset if we have existing data. var fi os.FileInfo @@ -884,8 +1302,9 @@ func (mb *msgBlock) setupWriteCache(buf []byte) { // This rolls to a new append msg block. // Lock should be held. func (fs *fileStore) newMsgBlockForWrite() (*msgBlock, error) { - var mbuf []byte index := uint64(1) + var rbuf []byte + if lmb := fs.lmb; lmb != nil { index = lmb.index + 1 @@ -894,8 +1313,15 @@ func (fs *fileStore) newMsgBlockForWrite() (*msgBlock, error) { // Reset write timestamp and see if we can expire this cache. lmb.mu.Lock() lmb.closeFDsLocked() - lmb.lwts = 0 - mbuf = lmb.expireCacheLocked() + if lmb.cache != nil { + lmb.lwts = 0 + buf, llts := lmb.cache.buf, lmb.llts + lmb.expireCacheLocked() + // We could check for a certain time since last load, but to be safe just reuse if no loads. + if llts == 0 && (lmb.cache == nil || lmb.cache.buf == nil) { + rbuf = buf + } + } lmb.mu.Unlock() } } @@ -904,7 +1330,10 @@ func (fs *fileStore) newMsgBlockForWrite() (*msgBlock, error) { // Lock should be held to quiet race detector. mb.mu.Lock() - mb.setupWriteCache(mbuf) + mb.setupWriteCache(rbuf) + if fs.tms { + mb.fss = make(map[string]*SimpleState) + } mb.mu.Unlock() // Now do local hash. @@ -932,11 +1361,21 @@ func (fs *fileStore) newMsgBlockForWrite() (*msgBlock, error) { } mb.ifd = ifd + // For subject based info. + mb.sfn = path.Join(mdir, fmt.Sprintf(fssScan, mb.index)) + + // Check if encryption is enabled. + if fs.prf != nil { + if err := fs.genEncryptionKeysForBlock(mb); err != nil { + return nil, err + } + } + // Set cache time to creation time to start. ts := time.Now().UnixNano() // Race detector wants these protected. mb.mu.Lock() - mb.llts, mb.lwts = ts, ts + mb.llts, mb.lwts = 0, ts mb.mu.Unlock() // Remember our last sequence number. @@ -955,6 +1394,27 @@ func (fs *fileStore) newMsgBlockForWrite() (*msgBlock, error) { return mb, nil } +// Generate the keys for this message block and write them out. +func (fs *fileStore) genEncryptionKeysForBlock(mb *msgBlock) error { + if mb == nil { + return nil + } + key, bek, seed, encrypted, err := fs.genEncryptionKeys(fmt.Sprintf("%s:%d", fs.cfg.Name, mb.index)) + if err != nil { + return err + } + mb.aek, mb.bek, mb.seed, mb.nonce = key, bek, seed, encrypted[:key.NonceSize()] + mdir := path.Join(fs.fcfg.StoreDir, msgDir) + keyFile := path.Join(mdir, fmt.Sprintf(keyScan, mb.index)) + if _, err := os.Stat(keyFile); err != nil && !os.IsNotExist(err) { + return err + } + if err := ioutil.WriteFile(keyFile, encrypted, defaultFilePerms); err != nil { + return err + } + return nil +} + // Make sure we can write to the last message block. // Lock should be held. func (fs *fileStore) enableLastMsgBlockForWriting() error { @@ -994,6 +1454,11 @@ func (fs *fileStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts in if fs.cfg.MaxBytes > 0 && fs.state.Bytes+uint64(len(msg)+len(hdr)) >= uint64(fs.cfg.MaxBytes) { return ErrMaxBytes } + if fs.cfg.MaxMsgsPer > 0 && len(subj) > 0 { + if msgs, _, _ := fs.perSubjectState(subj); msgs >= uint64(fs.cfg.MaxMsgsPer) { + return ErrMaxMsgsPerSubject + } + } } // Check sequence. @@ -1022,6 +1487,11 @@ func (fs *fileStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts in fs.state.LastSeq = seq fs.state.LastTime = now + // Enforce per message limits. + if fs.cfg.MaxMsgsPer > 0 && len(subj) > 0 { + fs.enforcePerSubjectLimit(subj) + } + // Limits checks and enforcement. // If they do any deletions they will update the // byte count on their own, so no need to compensate. @@ -1033,11 +1503,6 @@ func (fs *fileStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts in fs.startAgeChk() } - // If we had an index cache wipe that out. - if fs.fsi != nil { - fs.fsi, fs.fsis = nil, nil - } - return nil } @@ -1144,6 +1609,23 @@ func (fs *fileStore) rebuildFirst() { } } +// Will check the msg limit for this tracked subject. +// Lock should be held. +func (fs *fileStore) enforcePerSubjectLimit(subj string) { + if fs.closed || fs.sips > 0 || fs.cfg.MaxMsgsPer < 0 || !fs.tms { + return + } + for { + msgs, first, _ := fs.perSubjectState(subj) + if msgs <= uint64(fs.cfg.MaxMsgsPer) { + return + } + if ok, _ := fs.removeMsg(first, false, false); !ok { + break + } + } +} + // Will check the msg limit and drop firstSeq msg if needed. // Lock should be held. func (fs *fileStore) enforceMsgLimit() { @@ -1176,39 +1658,53 @@ func (fs *fileStore) enforceBytesLimit() { func (fs *fileStore) deleteFirstMsg() (bool, error) { fs.mu.Unlock() defer fs.mu.Lock() - return fs.removeMsg(fs.state.FirstSeq, false) + return fs.removeMsg(fs.state.FirstSeq, false, true) } // RemoveMsg will remove the message from this store. // Will return the number of bytes removed. func (fs *fileStore) RemoveMsg(seq uint64) (bool, error) { - return fs.removeMsg(seq, false) + return fs.removeMsg(seq, false, true) } func (fs *fileStore) EraseMsg(seq uint64) (bool, error) { - return fs.removeMsg(seq, true) + return fs.removeMsg(seq, true, true) } // Remove a message, optionally rewriting the mb file. -func (fs *fileStore) removeMsg(seq uint64, secure bool) (bool, error) { - fs.mu.Lock() +func (fs *fileStore) removeMsg(seq uint64, secure, needFSLock bool) (bool, error) { + fsLock := func() { + if needFSLock { + fs.mu.Lock() + } + } + fsUnlock := func() { + if needFSLock { + fs.mu.Unlock() + } + } + + fsLock() if fs.closed { - fs.mu.Unlock() + fsUnlock() return false, ErrStoreClosed } if fs.sips > 0 { - fs.mu.Unlock() + fsUnlock() return false, ErrStoreSnapshotInProgress } - + // If in encrypted mode negate secure rewrite here. + if secure && fs.prf != nil { + secure = false + } mb := fs.selectMsgBlock(seq) if mb == nil { var err = ErrStoreEOF if seq <= fs.state.LastSeq { err = ErrStoreMsgNotFound } - fs.mu.Unlock() + fsUnlock() return false, err } @@ -1227,18 +1723,18 @@ func (fs *fileStore) removeMsg(seq uint64, secure bool) (bool, error) { // Check cache. This should be very rare. if mb.cache == nil || mb.cache.idx == nil || seq < mb.cache.fseq && mb.cache.off > 0 { mb.mu.Unlock() - fs.mu.Unlock() + fsUnlock() if err := mb.loadMsgs(); err != nil { return false, err } - fs.mu.Lock() + fsLock() mb.mu.Lock() } // See if the sequence numbers is still relevant. Check first and cache first. if seq < mb.first.seq || seq < mb.cache.fseq || (seq-mb.cache.fseq) >= uint64(len(mb.cache.idx)) { mb.mu.Unlock() - fs.mu.Unlock() + fsUnlock() return false, nil } @@ -1246,7 +1742,7 @@ func (fs *fileStore) removeMsg(seq uint64, secure bool) (bool, error) { if mb.dmap != nil { if _, ok := mb.dmap[seq]; ok { mb.mu.Unlock() - fs.mu.Unlock() + fsUnlock() return false, nil } } @@ -1263,15 +1759,23 @@ func (fs *fileStore) removeMsg(seq uint64, secure bool) (bool, error) { fs.state.Msgs-- fs.state.Bytes -= msz - // If we had an index cache wipe that out. - if fs.fsi != nil { - fs.fsi, fs.fsis = nil, nil - } - // Now local mb updates. mb.msgs-- mb.bytes -= msz + // If we are tracking multiple subjects here make sure we update that accounting. + if mb.fss != nil { + if sm == nil { + if !mb.cacheAlreadyLoaded() { + mb.loadMsgsWithLock() + } + sm, _ = mb.cacheLookupWithLock(seq) + } + if sm != nil { + mb.removeSeqPerSubject(sm.subj, seq) + } + } + var shouldWriteIndex, firstSeqNeedsUpdate bool if secure { @@ -1302,8 +1806,7 @@ func (fs *fileStore) removeMsg(seq uint64, secure bool) (bool, error) { var qch, fch chan struct{} if shouldWriteIndex { - qch = mb.qch - fch = mb.fch + qch, fch = mb.qch, mb.fch } cb := fs.scb mb.mu.Unlock() @@ -1344,6 +1847,10 @@ func (fs *fileStore) removeMsg(seq uint64, secure bool) (bool, error) { cb(-1, -delta, seq, subj) } + if !needFSLock { + fs.mu.Lock() + } + return true, nil } @@ -1722,19 +2229,19 @@ func (mb *msgBlock) expireCache() { mb.expireCacheLocked() } -func (mb *msgBlock) expireCacheLocked() []byte { +func (mb *msgBlock) expireCacheLocked() { if mb.cache == nil { if mb.ctmr != nil { mb.ctmr.Stop() mb.ctmr = nil } - return nil + return } // Can't expire if we are flushing or still have pending. if mb.cache.flush || (len(mb.cache.buf)-int(mb.cache.wp) > 0) { mb.resetCacheExpireTimer(mb.cexp) - return nil + return } // Grab timestamp to compare. @@ -1749,12 +2256,11 @@ func (mb *msgBlock) expireCacheLocked() []byte { // Check for activity on the cache that would prevent us from expiring. if tns-bufts <= int64(mb.cexp) { mb.resetCacheExpireTimer(mb.cexp - time.Duration(tns-bufts)) - return nil + return } // If we are here we will at least expire the core msg buffer. // We need to capture offset in case we do a write next before a full load. - buf := mb.cache.buf mb.cache.off += len(mb.cache.buf) mb.cache.buf = nil mb.cache.wp = 0 @@ -1766,8 +2272,6 @@ func (mb *msgBlock) expireCacheLocked() []byte { } else { mb.resetCacheExpireTimer(mb.cexp) } - - return buf[:0] } func (fs *fileStore) startAgeChk() { @@ -1776,13 +2280,6 @@ func (fs *fileStore) startAgeChk() { } } -// Lock should be held. -func (fs *fileStore) expireMsgsLocked() { - fs.mu.Unlock() - fs.expireMsgs() - fs.mu.Lock() -} - // Lock should be held. func (fs *fileStore) resetAgeChk(delta int64) { fireIn := fs.cfg.MaxAge @@ -1811,7 +2308,7 @@ func (fs *fileStore) expireMsgs() { var sm *fileStoredMsg minAge := time.Now().UnixNano() - int64(fs.cfg.MaxAge) for sm, _ = fs.msgForSeq(0); sm != nil && sm.ts <= minAge; sm, _ = fs.msgForSeq(0) { - fs.removeMsg(sm.seq, false) + fs.removeMsg(sm.seq, false, true) } fs.mu.Lock() @@ -1920,11 +2417,21 @@ func (mb *msgBlock) writeMsgRecord(rl, seq uint64, subj string, mhdr, msg []byte // Set cache timestamp for last store. mb.lwts = ts // Decide if we write index info if flushing in place. - writeIndex := ts-mb.lwits > int64(time.Second) + writeIndex := ts-mb.lwits > int64(2*time.Second) // Accounting mb.updateAccounting(seq, ts, rl) + // Check if we are tracking per subject for our simple state. + if len(subj) > 0 && mb.fss != nil { + if ss := mb.fss[subj]; ss != nil { + ss.Msgs++ + ss.Last = seq + } else { + mb.fss[subj] = &SimpleState{Msgs: 1, First: seq, Last: seq} + } + } + fch, werr := mb.fch, mb.werr mb.mu.Unlock() @@ -2091,15 +2598,6 @@ func (fs *fileStore) syncBlocks() { } } - fs.mu.RLock() - cfs := append([]*consumerFileStore(nil), fs.cfs...) - fs.mu.RUnlock() - - // Do consumers. - for _, o := range cfs { - o.syncStateFile() - } - fs.mu.Lock() fs.syncTmr = time.AfterFunc(fs.fcfg.SyncInterval, fs.syncBlocks) fs.mu.Unlock() @@ -2179,11 +2677,10 @@ func (mb *msgBlock) indexCacheBuf(buf []byte) error { // Clear any headers bit that could be set. rl &^= hbit - dlen := int(rl) - msgHdrSize // Do some quick sanity checks here. - if dlen < 0 || int(slen) > dlen || dlen > int(rl) { + if dlen < 0 || int(slen) > dlen || dlen > int(rl) || rl > 32*1024*1024 { // This means something is off. // TODO(dlc) - Add into bad list? return errCorruptState @@ -2273,7 +2770,22 @@ func (mb *msgBlock) flushPendingMsgs() error { mfd := mb.mfd mb.mu.Unlock() - var n, tn int + var n int + + // Check if we need to encrypt. + if mb.bek != nil && lob > 0 { + const rsz = 32 * 1024 // 32k + var rdst [rsz]byte + var dst []byte + if lob > rsz { + dst = make([]byte, lob) + } else { + dst = rdst[:lob] + } + // Need to leave original alone. + mb.bek.XORKeyStream(dst, buf) + buf = dst + } // Append new data to the message block file. for lbb := lob; lbb > 0; lbb = len(buf) { @@ -2291,10 +2803,6 @@ func (mb *msgBlock) flushPendingMsgs() error { } return err } - - woff += int64(n) - tn += n - // Partial write. if n != lbb { buf = buf[n:] @@ -2304,6 +2812,9 @@ func (mb *msgBlock) flushPendingMsgs() error { } } + // Update our write offset. + woff += int64(lob) + // We did a successful write. // Re-acquire lock to update. mb.mu.Lock() @@ -2323,10 +2834,10 @@ func (mb *msgBlock) flushPendingMsgs() error { // Decide what we want to do with the buffer in hand. If we have load interest // we will hold onto the whole thing, otherwise empty the buffer, possibly reusing it. if ts := time.Now().UnixNano(); ts < mb.llts || (ts-mb.llts) <= int64(mb.cexp) { - mb.cache.wp += tn + mb.cache.wp += lob } else { - if cap(buf) <= maxBufReuse { - buf = buf[:0] + if cap(mb.cache.buf) <= maxBufReuse { + buf = mb.cache.buf[:0] } else { buf = nil } @@ -2359,7 +2870,42 @@ func (mb *msgBlock) loadMsgs() error { // We hold the lock here the whole time by design. mb.mu.Lock() defer mb.mu.Unlock() + return mb.loadMsgsWithLock() +} +// Lock should be held. +func (mb *msgBlock) cacheAlreadyLoaded() bool { + return mb.cache != nil && len(mb.cache.idx) == int(mb.msgs) && mb.cache.off == 0 && len(mb.cache.buf) > 0 +} + +// Used to load in the block contents. +// Lock should be held and all conditionals satisfied prior. +func (mb *msgBlock) loadBlock(buf []byte) ([]byte, error) { + f, err := os.Open(mb.mfn) + if err != nil { + return nil, err + } + defer f.Close() + + var sz int + if info, err := f.Stat(); err == nil { + sz64 := info.Size() + if int64(int(sz64)) == sz64 { + sz = int(sz64) + } + } + + if sz > cap(buf) { + buf = make([]byte, sz) + } else { + buf = buf[:sz] + } + + n, err := io.ReadFull(f, buf) + return buf[:n], err +} + +func (mb *msgBlock) loadMsgsWithLock() error { // Check to see if we are loading already. if mb.loading { return nil @@ -2378,11 +2924,10 @@ checkCache: } // Check to see if we have a full cache. - if mb.cache != nil && len(mb.cache.idx) == int(mb.msgs) && mb.cache.off == 0 && len(mb.cache.buf) > 0 { + if mb.cacheAlreadyLoaded() { return nil } - mfn := mb.mfn mb.llts = time.Now().UnixNano() // FIXME(dlc) - We could be smarter here. @@ -2398,7 +2943,7 @@ checkCache: // Load in the whole block. We want to hold the mb lock here to avoid any changes to // state. - buf, err := ioutil.ReadFile(mfn) + buf, err := mb.loadBlock(nil) if err != nil { return err } @@ -2407,6 +2952,15 @@ checkCache: // Make sure this is cleared in case we had a partial when we started. mb.clearCacheAndOffset() + // Check if we need to decrypt. + if mb.bek != nil && len(buf) > 0 { + rbek, err := chacha20.NewUnauthenticatedCipher(mb.seed, mb.nonce) + if err != nil { + return err + } + rbek.XORKeyStream(buf, buf) + } + if err := mb.indexCacheBuf(buf); err != nil { if err == errCorruptState { fs := mb.fs @@ -2461,6 +3015,8 @@ var ( errFlushRunning = errors.New("flush is already running") errCorruptState = errors.New("corrupt state file") errPendingData = errors.New("pending data still present") + errNoEncryption = errors.New("encryption not enabled") + errBadKeySize = errors.New("encryption bad key size") ) // Used for marking messages that have had their checksums checked. @@ -2655,6 +3211,21 @@ func (fs *fileStore) LoadMsg(seq uint64) (string, []byte, []byte, int64, error) return _EMPTY_, nil, nil, 0, err } +// LoadLastMsg will return the last message we have that matches a given subject. +// The subject can be a wildcard. +func (fs *fileStore) LoadLastMsg(subject string) (subj string, seq uint64, hdr, msg []byte, ts int64, err error) { + var sm *fileStoredMsg + if subject == _EMPTY_ || subject == fwcs { + sm, _ = fs.msgForSeq(fs.lastSeq()) + } else if ss := fs.FilteredState(1, subject); ss.Msgs > 0 { + sm, _ = fs.msgForSeq(ss.Last) + } + if sm == nil { + return _EMPTY_, 0, nil, nil, 0, ErrStoreMsgNotFound + } + return sm.subj, sm.seq, sm.hdr, sm.msg, sm.ts, nil +} + // Type returns the type of the underlying store. func (fs *fileStore) Type() StorageType { return FileStorage @@ -2719,8 +3290,9 @@ func fileStoreMsgSizeEstimate(slen, maxPayload int) uint64 { } // Write index info to the appropriate file. +// Lock should be held. func (mb *msgBlock) writeIndexInfo() error { - // HEADER: magic version msgs bytes fseq fts lseq lts checksum + // HEADER: magic version msgs bytes fseq fts lseq lts ndel checksum var hdr [indexHdrSize]byte // Write header @@ -2755,6 +3327,11 @@ func (mb *msgBlock) writeIndexInfo() error { mb.lwits = time.Now().UnixNano() + // Encrypt if needed. + if mb.aek != nil { + buf = mb.aek.Seal(buf[:0], mb.nonce, buf, nil) + } + if n, err = mb.ifd.WriteAt(buf, 0); err == nil { mb.liwsz = int64(n) mb.werr = nil @@ -2772,6 +3349,14 @@ func (mb *msgBlock) readIndexInfo() error { return err } + // Decrypt if needed. + if mb.aek != nil { + buf, err = mb.aek.Open(buf[:0], mb.nonce, buf, nil) + if err != nil { + return err + } + } + if err := checkHeader(buf); err != nil { defer os.Remove(mb.ifn) return fmt.Errorf("bad index file") @@ -2904,6 +3489,129 @@ func (fs *fileStore) dmapEntries() int { return total } +// Fixed helper for iterating. +func subjectsEqual(a, b string) bool { + return a == b +} + +func subjectsAll(a, b string) bool { + return true +} + +func compareFn(subject string) func(string, string) bool { + if subject == _EMPTY_ || subject == fwcs { + return subjectsAll + } + if subjectHasWildcard(subject) { + return subjectIsSubsetMatch + } + return subjectsEqual +} + +// PurgeEx will remove messages based on subject filters, sequence and number of messages to keep. +// Will return the number of purged messages. +func (fs *fileStore) PurgeEx(subject string, sequence, keep uint64) (purged uint64, err error) { + if subject == _EMPTY_ || subject == fwcs { + if keep == 0 && (sequence == 0 || sequence == 1) { + return fs.Purge() + } + if sequence > 1 { + return fs.Compact(sequence) + } else if keep > 0 { + fs.mu.RLock() + msgs, lseq := fs.state.Msgs, fs.state.LastSeq + fs.mu.RUnlock() + if keep >= msgs { + return 0, nil + } + return fs.Compact(lseq - keep + 1) + } + return 0, nil + } + + eq, wc := compareFn(subject), subjectHasWildcard(subject) + var firstSeqNeedsUpdate bool + + // If we have a "keep" designation need to get full filtered state so we know how many to purge. + var maxp uint64 + if keep > 0 { + ss := fs.FilteredState(1, subject) + if keep >= ss.Msgs { + return 0, nil + } + maxp = ss.Msgs - keep + } + + fs.mu.Lock() + for _, mb := range fs.blks { + mb.mu.Lock() + t, f, l := mb.filteredPendingLocked(subject, wc, mb.first.seq) + if t == 0 { + mb.mu.Unlock() + continue + } + + var shouldExpire bool + if !mb.cacheAlreadyLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } + if sequence > 0 && sequence <= l { + l = sequence - 1 + } + for seq := f; seq <= l; seq++ { + if sm, _ := mb.cacheLookupWithLock(seq); sm != nil && eq(sm.subj, subject) { + rl := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) + // Do fast in place remove. + // Stats + fs.state.Msgs-- + fs.state.Bytes -= rl + mb.msgs-- + mb.bytes -= rl + // FSS updates. + mb.removeSeqPerSubject(sm.subj, seq) + // Check for first message. + if seq == mb.first.seq { + mb.selectNextFirst() + if mb.isEmpty() { + fs.removeMsgBlock(mb) + firstSeqNeedsUpdate = seq == fs.state.FirstSeq + } else if seq == fs.state.FirstSeq { + fs.state.FirstSeq = mb.first.seq // new one. + fs.state.FirstTime = time.Unix(0, mb.first.ts).UTC() + } + } else { + // Out of order delete. + if mb.dmap == nil { + mb.dmap = make(map[uint64]struct{}) + } + mb.dmap[seq] = struct{}{} + } + purged++ + if maxp > 0 && purged >= maxp { + break + } + } + } + // Expire if we were responsible for loading. + if shouldExpire { + // Expire this cache before moving on. + mb.llts = 0 + mb.expireCacheLocked() + } + + mb.mu.Unlock() + // Update our index info on disk. + mb.writeIndexInfo() + } + if firstSeqNeedsUpdate { + fs.selectNextFirst() + } + + fs.mu.Unlock() + return purged, nil +} + // Purge will remove all messages from this store. // Will return the number of purged messages. func (fs *fileStore) Purge() (uint64, error) { @@ -2980,8 +3688,7 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { return fs.purge(seq) } - var purged uint64 - var bytes uint64 + var purged, bytes uint64 // We have to delete interior messages. fs.mu.Lock() @@ -2996,9 +3703,9 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { } // All msgblocks up to this one can be thrown away. - for i, mb := range fs.blks { + var deleted int + for _, mb := range fs.blks { if mb == smb { - fs.blks = append(fs.blks[:0:0], fs.blks[i:]...) break } mb.mu.Lock() @@ -3006,35 +3713,62 @@ func (fs *fileStore) Compact(seq uint64) (uint64, error) { bytes += mb.bytes mb.dirtyCloseWithRemove(true) mb.mu.Unlock() + deleted++ } smb.mu.Lock() for mseq := smb.first.seq; mseq < seq; mseq++ { - if sm, _ := smb.cacheLookupWithLock(mseq); sm != nil && smb.msgs > 0 { + sm, err := smb.cacheLookupWithLock(mseq) + if err == errDeletedMsg { + // Update dmap. + if len(smb.dmap) > 0 { + delete(smb.dmap, seq) + if len(smb.dmap) == 0 { + smb.dmap = nil + } + } + } else if sm != nil { sz := fileStoreMsgSize(sm.subj, sm.hdr, sm.msg) smb.bytes -= sz bytes += sz smb.msgs-- purged++ + // Update fss + smb.removeSeqPerSubject(sm.subj, mseq) } } - // Update first entry. - sm, _ := smb.cacheLookupWithLock(seq) - if sm != nil { - smb.first.seq = sm.seq - smb.first.ts = sm.ts + // Check if empty after processing, could happen if tail of messages are all deleted. + isEmpty := smb.msgs == 0 + if isEmpty { + smb.dirtyCloseWithRemove(true) + // Update fs first here as well. + fs.state.FirstSeq = smb.last.seq + 1 + fs.state.FirstTime = time.Time{} + deleted++ + } else { + // Update fs first seq and time. + smb.first.seq = seq - 1 // Just for start condition for selectNextFirst. + smb.selectNextFirst() + fs.state.FirstSeq = smb.first.seq + fs.state.FirstTime = time.Unix(0, smb.first.ts).UTC() } smb.mu.Unlock() - if sm != nil { - // Reset our version of first. - fs.state.FirstSeq = sm.seq - fs.state.FirstTime = time.Unix(0, sm.ts).UTC() - fs.state.Msgs -= purged - fs.state.Bytes -= bytes + if !isEmpty { + // Make sure to write out our index info. + smb.writeIndexInfo() + } + + if deleted > 0 { + // Update blks slice. + fs.blks = append(fs.blks[:0:0], fs.blks[deleted:]...) } + // Update top level accounting. + fs.state.Msgs -= purged + fs.state.Bytes -= bytes + cb := fs.scb fs.mu.Unlock() @@ -3201,6 +3935,156 @@ func (mb *msgBlock) dirtyCloseWithRemove(remove bool) { } } +// Remove a seq from the fss and select new first. +// Lock should be held. +func (mb *msgBlock) removeSeqPerSubject(subj string, seq uint64) { + ss := mb.fss[subj] + if ss == nil { + return + } + if ss.Msgs == 1 { + delete(mb.fss, subj) + return + } + + ss.Msgs-- + if seq != ss.First { + return + } + // TODO(dlc) - Might want to optimize this. + for tseq := seq + 1; tseq <= ss.Last; tseq++ { + if sm, _ := mb.cacheLookupWithLock(tseq); sm != nil { + if sm.subj == subj { + ss.First = tseq + return + } + } + } +} + +// generatePerSubjectInfo will generate the per subject info via the raw msg block. +func (mb *msgBlock) generatePerSubjectInfo() error { + mb.mu.Lock() + defer mb.mu.Unlock() + + var shouldExpire bool + if !mb.cacheAlreadyLoaded() { + mb.loadMsgsWithLock() + shouldExpire = true + } + if mb.fss == nil { + mb.fss = make(map[string]*SimpleState) + } + fseq, lseq := mb.first.seq, mb.last.seq + for seq := fseq; seq <= lseq; seq++ { + if sm, _ := mb.cacheLookupWithLock(seq); sm != nil && len(sm.subj) > 0 { + if ss := mb.fss[sm.subj]; ss != nil { + ss.Msgs++ + ss.Last = seq + } else { + mb.fss[sm.subj] = &SimpleState{Msgs: 1, First: seq, Last: seq} + } + } + } + if shouldExpire { + // Expire this cache before moving on. + mb.llts = 0 + mb.expireCacheLocked() + } + return nil +} + +// readPerSubjectInfo will attempt to restore the per subject information. +func (mb *msgBlock) readPerSubjectInfo() error { + // Remove after processing regardless. + defer os.Remove(mb.sfn) + + const ( + fileHashIndex = 16 + mbHashIndex = 8 + minFileSize = 24 + ) + + buf, err := ioutil.ReadFile(mb.sfn) + + if err != nil || len(buf) < minFileSize || checkHeader(buf) != nil { + return mb.generatePerSubjectInfo() + } + + // Check that we did not have any bit flips. + mb.hh.Reset() + mb.hh.Write(buf[0 : len(buf)-fileHashIndex]) + fhash := buf[len(buf)-fileHashIndex : len(buf)-mbHashIndex] + if checksum := mb.hh.Sum(nil); !bytes.Equal(checksum, fhash) { + return mb.generatePerSubjectInfo() + } + + if !bytes.Equal(buf[len(buf)-mbHashIndex:], mb.lchk[:]) { + return mb.generatePerSubjectInfo() + } + + fss := make(map[string]*SimpleState) + + bi := hdrLen + readU64 := func() uint64 { + if bi < 0 { + return 0 + } + num, n := binary.Uvarint(buf[bi:]) + if n <= 0 { + bi = -1 + return 0 + } + bi += n + return num + } + + for i, numEntries := uint64(0), readU64(); i < numEntries; i++ { + lsubj := readU64() + subj := buf[bi : bi+int(lsubj)] + bi += int(lsubj) + msgs, first, last := readU64(), readU64(), readU64() + fss[string(subj)] = &SimpleState{Msgs: msgs, First: first, Last: last} + } + mb.mu.Lock() + mb.fss = fss + mb.mu.Unlock() + return nil +} + +// writePerSubjectInfo will write out per subject information if we are tracking per subject. +// Lock should be held. +func (mb *msgBlock) writePerSubjectInfo() error { + // Raft groups do not have any subjects. + if len(mb.fss) == 0 { + return nil + } + var scratch [4 * binary.MaxVarintLen64]byte + var b bytes.Buffer + b.WriteByte(magic) + b.WriteByte(version) + n := binary.PutUvarint(scratch[0:], uint64(len(mb.fss))) + b.Write(scratch[0:n]) + for subj, ss := range mb.fss { + n := binary.PutUvarint(scratch[0:], uint64(len(subj))) + b.Write(scratch[0:n]) + b.WriteString(subj) + // Encode all three parts of our simple state into same scratch buffer. + n = binary.PutUvarint(scratch[0:], ss.Msgs) + n += binary.PutUvarint(scratch[n:], ss.First) + n += binary.PutUvarint(scratch[n:], ss.Last) + b.Write(scratch[0:n]) + } + // Calculate hash for this information. + mb.hh.Reset() + mb.hh.Write(b.Bytes()) + b.Write(mb.hh.Sum(nil)) + // Now copy over checksum from the block itself, this allows us to know if we are in sync. + b.Write(mb.lchk[:]) + + return ioutil.WriteFile(mb.sfn, b.Bytes(), defaultFilePerms) +} + func (mb *msgBlock) close(sync bool) { if mb == nil { return @@ -3213,6 +4097,11 @@ func (mb *msgBlock) close(sync bool) { } mb.closed = true + // Check if we are tracking by subject. + if mb.fss != nil { + mb.writePerSubjectInfo() + } + // Close cache mb.clearCacheAndOffset() // Quit our loops. @@ -3339,24 +4228,17 @@ func (fs *fileStore) streamSnapshot(w io.WriteCloser, state *StreamState, includ fs.mu.Lock() blks := fs.blks - // Write our general meta data. - if err := fs.writeStreamMeta(); err != nil { - fs.mu.Unlock() - writeErr(fmt.Sprintf("Could not write stream meta file: %v", err)) - return - } - meta, err := ioutil.ReadFile(path.Join(fs.fcfg.StoreDir, JetStreamMetaFile)) - if err != nil { - fs.mu.Unlock() - writeErr(fmt.Sprintf("Could not read stream meta file: %v", err)) - return - } - sum, err := ioutil.ReadFile(path.Join(fs.fcfg.StoreDir, JetStreamMetaFileSum)) + // Grab our general meta data. + // We do this now instead of pulling from files since they could be encrypted. + meta, err := json.Marshal(fs.cfg) if err != nil { fs.mu.Unlock() - writeErr(fmt.Sprintf("Could not read stream checksum file: %v", err)) + writeErr(fmt.Sprintf("Could not gather stream meta file: %v", err)) return } + fs.hh.Reset() + fs.hh.Write(meta) + sum := []byte(hex.EncodeToString(fs.hh.Sum(nil))) fs.mu.Unlock() // Meta first. @@ -3370,6 +4252,8 @@ func (fs *fileStore) streamSnapshot(w io.WriteCloser, state *StreamState, includ // Can't use join path here, tar only recognizes relative paths with forward slashes. msgPre := msgDir + "/" + var bbuf []byte + // Now do messages themselves. for _, mb := range blks { if mb.pendingWriteSize() > 0 { @@ -3380,25 +4264,43 @@ func (fs *fileStore) streamSnapshot(w io.WriteCloser, state *StreamState, includ buf, err := ioutil.ReadFile(mb.ifn) if err != nil { mb.mu.Unlock() - writeErr(fmt.Sprintf("Could not read message block [%d] meta file: %v", mb.index, err)) + writeErr(fmt.Sprintf("Could not read message block [%d] index file: %v", mb.index, err)) return } + // Check for encryption. + if mb.aek != nil && len(buf) > 0 { + buf, err = mb.aek.Open(buf[:0], mb.nonce, buf, nil) + if err != nil { + mb.mu.Unlock() + writeErr(fmt.Sprintf("Could not decrypt message block [%d] index file: %v", mb.index, err)) + return + } + } if writeFile(msgPre+fmt.Sprintf(indexScan, mb.index), buf) != nil { mb.mu.Unlock() return } // We could stream but don't want to hold the lock and prevent changes, so just read in and // release the lock for now. - // TODO(dlc) - Maybe reuse buffer? - buf, err = ioutil.ReadFile(mb.mfn) + bbuf, err = mb.loadBlock(bbuf) if err != nil { mb.mu.Unlock() writeErr(fmt.Sprintf("Could not read message block [%d]: %v", mb.index, err)) return } + // Check for encryption. + if mb.bek != nil && len(bbuf) > 0 { + rbek, err := chacha20.NewUnauthenticatedCipher(mb.seed, mb.nonce) + if err != nil { + mb.mu.Unlock() + writeErr(fmt.Sprintf("Could not create encryption key for message block [%d]: %v", mb.index, err)) + return + } + rbek.XORKeyStream(bbuf, bbuf) + } mb.mu.Unlock() // Do this one unlocked. - if writeFile(msgPre+fmt.Sprintf(blkScan, mb.index), buf) != nil { + if writeFile(msgPre+fmt.Sprintf(blkScan, mb.index), bbuf) != nil { return } } @@ -3415,18 +4317,17 @@ func (fs *fileStore) streamSnapshot(w io.WriteCloser, state *StreamState, includ for _, o := range cfs { o.mu.Lock() - meta, err := ioutil.ReadFile(path.Join(o.odir, JetStreamMetaFile)) - if err != nil { - o.mu.Unlock() - writeErr(fmt.Sprintf("Could not read consumer meta file for %q: %v", o.name, err)) - return - } - sum, err := ioutil.ReadFile(path.Join(o.odir, JetStreamMetaFileSum)) + // Grab our general meta data. + // We do this now instead of pulling from files since they could be encrypted. + meta, err := json.Marshal(o.cfg) if err != nil { o.mu.Unlock() - writeErr(fmt.Sprintf("Could not read consumer checksum file for %q: %v", o.name, err)) + writeErr(fmt.Sprintf("Could not gather consumer meta file for %q: %v", o.name, err)) return } + o.hh.Reset() + o.hh.Write(meta) + sum := []byte(hex.EncodeToString(o.hh.Sum(nil))) // We can have the running state directly encoded now. state, err := o.encodeState() @@ -3502,17 +4403,18 @@ type consumerFileStore struct { mu sync.Mutex fs *fileStore cfg *FileConsumerInfo + prf keyGen + aek cipher.AEAD name string odir string ifn string - ifd *os.File - lwsz int64 hh hash.Hash64 state ConsumerState fch chan struct{} qch chan struct{} flusher bool writing bool + dirty bool closed bool } @@ -3534,6 +4436,7 @@ func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerSt o := &consumerFileStore{ fs: fs, cfg: csi, + prf: fs.prf, name: name, odir: odir, ifn: path.Join(odir, consumerState), @@ -3545,6 +4448,25 @@ func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerSt } o.hh = hh + // Check for encryption. + if o.prf != nil { + if ekey, err := ioutil.ReadFile(path.Join(odir, JetStreamMetaFileKey)); err == nil { + // Recover key encryption key. + kek, err := chacha20poly1305.NewX(fs.prf([]byte(fs.cfg.Name + tsep + o.name))) + if err != nil { + return nil, err + } + ns := kek.NonceSize() + seed, err := kek.Open(nil, ekey[:ns], ekey[ns:], nil) + if err != nil { + return nil, err + } + if o.aek, err = chacha20poly1305.NewX(seed); err != nil { + return nil, err + } + } + } + // Write our meta data iff does not exist. meta := path.Join(odir, JetStreamMetaFile) if _, err := os.Stat(meta); err != nil && os.IsNotExist(err) { @@ -3554,6 +4476,25 @@ func (fs *fileStore) ConsumerStore(name string, cfg *ConsumerConfig) (ConsumerSt } } + // If we expect to be encrypted check that what we are restoring is not plaintext. + // This can happen on snapshot restores or conversions. + if o.prf != nil { + keyFile := path.Join(odir, JetStreamMetaFileKey) + if _, err := os.Stat(keyFile); err != nil && os.IsNotExist(err) { + if err := o.writeConsumerMeta(); err != nil { + return nil, err + } + // Redo the state file as well here if we have one and we can tell it was plaintext. + if buf, err := ioutil.ReadFile(o.ifn); err == nil { + if _, err := decodeConsumerState(buf); err == nil { + if err := ioutil.WriteFile(o.ifn, o.encryptState(buf), defaultFilePerms); err != nil { + return nil, err + } + } + } + } + } + // Create channels to control our flush go routine. o.fch = make(chan struct{}, 1) o.qch = make(chan struct{}) @@ -3575,6 +4516,7 @@ func (o *consumerFileStore) kickFlusher() { default: } } + o.dirty = true } // Set in flusher status @@ -3607,14 +4549,35 @@ func (o *consumerFileStore) flushLoop() { o.setInFlusher() defer o.clearInFlusher() + // Maintain approximately 10 updates per second per consumer under load. + const minTime = 100 * time.Millisecond + var lastWrite time.Time + var dt *time.Timer + + setDelayTimer := func(addWait time.Duration) { + if dt == nil { + dt = time.NewTimer(addWait) + return + } + if !dt.Stop() { + select { + case <-dt.C: + default: + } + } + dt.Reset(addWait) + } + for { select { case <-fch: - time.Sleep(10 * time.Millisecond) - select { - case <-qch: - return - default: + if ts := time.Since(lastWrite); ts < minTime { + setDelayTimer(minTime - ts) + select { + case <-dt.C: + case <-qch: + return + } } o.mu.Lock() if o.closed { @@ -3628,6 +4591,7 @@ func (o *consumerFileStore) flushLoop() { } // TODO(dlc) - if we error should start failing upwards. o.writeState(buf) + lastWrite = time.Now() case <-qch: return } @@ -3840,12 +4804,24 @@ func (o *consumerFileStore) Update(state *ConsumerState) error { o.state.AckFloor = state.AckFloor o.state.Pending = pending o.state.Redelivered = redelivered + o.kickFlusher() o.mu.Unlock() - o.kickFlusher() return nil } +// Will encrypt the state with our asset key. Will be a no-op if encryption not enabled. +// Lock should be held. +func (o *consumerFileStore) encryptState(buf []byte) []byte { + if o.aek == nil { + return buf + } + // TODO(dlc) - Optimize on space usage a bit? + nonce := make([]byte, o.aek.NonceSize(), o.aek.NonceSize()+len(buf)+o.aek.Overhead()) + rand.Read(nonce) + return o.aek.Seal(nonce, nonce, buf, nil) +} + func (o *consumerFileStore) writeState(buf []byte) error { // Check if we have the index file open. o.mu.Lock() @@ -3853,19 +4829,23 @@ func (o *consumerFileStore) writeState(buf []byte) error { o.mu.Unlock() return nil } - if err := o.ensureStateFileOpen(); err != nil { - o.mu.Unlock() - return err + + // Check on encryption. + if o.aek != nil { + buf = o.encryptState(buf) } + o.writing = true - ifd := o.ifd + o.dirty = false + ifn := o.ifn o.mu.Unlock() - n, err := ifd.WriteAt(buf, 0) + // Lock not held here. + err := ioutil.WriteFile(ifn, buf, defaultFilePerms) o.mu.Lock() - if err == nil { - o.lwsz = int64(n) + if err != nil { + o.dirty = true } o.writing = false o.mu.Unlock() @@ -3885,13 +4865,37 @@ func (o *consumerFileStore) updateConfig(cfg ConsumerConfig) error { // Lock should be held. func (cfs *consumerFileStore) writeConsumerMeta() error { meta := path.Join(cfs.odir, JetStreamMetaFile) - if _, err := os.Stat(meta); (err != nil && !os.IsNotExist(err)) || err == nil { + if _, err := os.Stat(meta); err != nil && !os.IsNotExist(err) { return err } + + if cfs.prf != nil && cfs.aek == nil { + fs := cfs.fs + key, _, _, encrypted, err := fs.genEncryptionKeys(fs.cfg.Name + tsep + cfs.name) + if err != nil { + return err + } + cfs.aek = key + keyFile := path.Join(cfs.odir, JetStreamMetaFileKey) + if _, err := os.Stat(keyFile); err != nil && !os.IsNotExist(err) { + return err + } + if err := ioutil.WriteFile(keyFile, encrypted, defaultFilePerms); err != nil { + return err + } + } + b, err := json.Marshal(cfs.cfg) if err != nil { return err } + // Encrypt if needed. + if cfs.aek != nil { + nonce := make([]byte, cfs.aek.NonceSize(), cfs.aek.NonceSize()+len(b)+cfs.aek.Overhead()) + rand.Read(nonce) + b = cfs.aek.Seal(nonce, nonce, b, nil) + } + if err := ioutil.WriteFile(meta, b, defaultFilePerms); err != nil { return err } @@ -3905,28 +4909,6 @@ func (cfs *consumerFileStore) writeConsumerMeta() error { return nil } -func (o *consumerFileStore) syncStateFile() { - // FIXME(dlc) - Hold last error? - o.mu.Lock() - if o.ifd != nil { - o.ifd.Sync() - o.ifd.Truncate(o.lwsz) - } - o.mu.Unlock() -} - -// Lock should be held. -func (o *consumerFileStore) ensureStateFileOpen() error { - if o.ifd == nil { - ifd, err := os.OpenFile(o.ifn, os.O_CREATE|os.O_RDWR, defaultFilePerms) - if err != nil { - return err - } - o.ifd = ifd - } - return nil -} - // Make sure the header is correct. func checkHeader(hdr []byte) error { if hdr == nil || len(hdr) < 2 || hdr[0] != magic || hdr[1] != version { @@ -3935,6 +4917,7 @@ func checkHeader(hdr []byte) error { return nil } +// Consumer version. func checkConsumerHeader(hdr []byte) (uint8, error) { if hdr == nil || len(hdr) < 2 || hdr[0] != magic { return 0, errCorruptState @@ -3995,6 +4978,15 @@ func (o *consumerFileStore) State() (*ConsumerState, error) { return state, nil } + // Check on encryption. + if o.aek != nil { + ns := o.aek.NonceSize() + buf, err = o.aek.Open(nil, buf[:ns], buf[ns:], nil) + if err != nil { + return nil, err + } + } + state, err = decodeConsumerState(buf) if err != nil { return nil, err @@ -4127,33 +5119,56 @@ func (o *consumerFileStore) Stop() error { } var err error - if !o.writing { - if err = o.ensureStateFileOpen(); err == nil { - var buf []byte - // Make sure to write this out.. - if buf, err = o.encodeState(); err == nil && len(buf) > 0 { - _, err = o.ifd.WriteAt(buf, 0) + var buf []byte + + if o.dirty { + // Make sure to write this out.. + if buf, err = o.encodeState(); err == nil && len(buf) > 0 { + if o.aek != nil { + buf = o.encryptState(buf) } } } - o.ifd, o.odir = nil, _EMPTY_ - fs, ifd := o.fs, o.ifd + o.odir = _EMPTY_ o.closed = true + ifn, fs := o.ifn, o.fs o.mu.Unlock() - if ifd != nil { - ifd.Close() - } - fs.removeConsumer(o) + + if len(buf) > 0 { + o.waitOnFlusher() + err = ioutil.WriteFile(ifn, buf, defaultFilePerms) + } return err } +func (o *consumerFileStore) waitOnFlusher() { + if !o.inFlusher() { + return + } + + timeout := time.Now().Add(100 * time.Millisecond) + for time.Now().Before(timeout) { + if !o.inFlusher() { + return + } + time.Sleep(10 * time.Millisecond) + } +} + // Delete the consumer. func (o *consumerFileStore) Delete() error { - o.mu.Lock() + return o.delete(false) +} + +func (o *consumerFileStore) StreamDelete() error { + return o.delete(true) +} +func (o *consumerFileStore) delete(streamDeleted bool) error { + o.mu.Lock() if o.closed { o.mu.Unlock() return nil @@ -4163,20 +5178,20 @@ func (o *consumerFileStore) Delete() error { o.qch = nil } - if o.ifd != nil { - o.ifd.Close() - o.ifd = nil - } var err error - if o.odir != _EMPTY_ { + // If our stream was deleted it will remove the directories. + if o.odir != _EMPTY_ && !streamDeleted { err = os.RemoveAll(o.odir) } - o.ifd, o.odir = nil, _EMPTY_ + o.odir = _EMPTY_ o.closed = true fs := o.fs o.mu.Unlock() - fs.removeConsumer(o) + if !streamDeleted { + fs.removeConsumer(o) + } + return err } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go b/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go index 1f626983..03f98ad1 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/fuzz.go @@ -15,7 +15,7 @@ package server -var defaultServerOptions = Options{ +var defaultFuzzServerOptions = Options{ Host: "127.0.0.1", Trace: true, Debug: true, @@ -24,15 +24,15 @@ var defaultServerOptions = Options{ NoSigs: true, } -func dummyClient() *client { - return &client{srv: New(&defaultServerOptions), msubs: -1, mpay: -1, mcl: MAX_CONTROL_LINE_SIZE} +func dummyFuzzClient() *client { + return &client{srv: New(&defaultFuzzServerOptions), msubs: -1, mpay: -1, mcl: MAX_CONTROL_LINE_SIZE} } func FuzzClient(data []byte) int { if len(data) < 100 { return -1 } - c := dummyClient() + c := dummyFuzzClient() err := c.parse(data[:50]) if err != nil { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/gateway.go b/vendor/github.com/nats-io/nats-server/v2/server/gateway.go index d34e666e..f84633c9 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/gateway.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/gateway.go @@ -2552,7 +2552,7 @@ func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgr sub.nm, sub.max = 0, 0 sub.client = gwc sub.subject = subject - didDeliver = c.deliverMsg(sub, subject, mreply, mh, msg, false) || didDeliver + didDeliver = c.deliverMsg(sub, acc, subject, mreply, mh, msg, false) || didDeliver } // Done with subscription, put back to pool. We don't need // to reset content since we explicitly set when using it. @@ -2718,11 +2718,11 @@ func getSubjectFromGWRoutedReply(reply []byte, isOldPrefix bool) []byte { // This should be invoked only from processInboundGatewayMsg() or // processInboundRoutedMsg() and is checking if the subject -// (c.pa.subject) has the $GNR prefix. If so, this is processed +// (c.pa.subject) has the _GR_ prefix. If so, this is processed // as a GW reply and `true` is returned to indicate to the caller // that it should stop processing. // If gateway is not enabled on this server or if the subject -// does not start with $GR, `false` is returned and caller should +// does not start with _GR_, `false` is returned and caller should // process message as usual. func (c *client) handleGatewayReply(msg []byte) (processed bool) { // Do not handle GW prefixed messages if this server does not have @@ -2830,7 +2830,18 @@ func (c *client) handleGatewayReply(msg []byte) (processed bool) { buf = append(buf, c.pa.reply...) buf = append(buf, ' ') } - buf = append(buf, c.pa.szb...) + szb := c.pa.szb + if c.pa.hdr >= 0 { + if route.headers { + buf[0] = 'H' + buf = append(buf, c.pa.hdb...) + buf = append(buf, ' ') + } else { + szb = []byte(strconv.Itoa(c.pa.size - c.pa.hdr)) + msg = msg[c.pa.hdr:] + } + } + buf = append(buf, szb...) mhEnd := len(buf) buf = append(buf, _CRLF_...) buf = append(buf, msg...) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go index ff132ce6..a4836a7a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream.go @@ -14,10 +14,12 @@ package server import ( + "crypto/hmac" "crypto/sha256" "encoding/binary" "encoding/hex" "encoding/json" + "errors" "fmt" "io/ioutil" "math" @@ -34,6 +36,7 @@ import ( "github.com/nats-io/nats-server/v2/server/sysmem" "github.com/nats-io/nkeys" "github.com/nats-io/nuid" + "golang.org/x/crypto/chacha20poly1305" ) // JetStreamConfig determines this server's configuration. @@ -81,6 +84,7 @@ type jetStream struct { memReserved int64 storeReserved int64 apiCalls int64 + apiErrors int64 memTotal int64 storeTotal int64 mu sync.RWMutex @@ -156,7 +160,7 @@ func (s *Server) EnableJetStream(config *JetStreamConfig) error { config.Domain = domain } s.Debugf("JetStream creating dynamic configuration - %s memory, %s disk", friendlyBytes(config.MaxMemory), friendlyBytes(config.MaxStore)) - } else if config != nil && config.StoreDir != _EMPTY_ { + } else if config.StoreDir != _EMPTY_ { config.StoreDir = filepath.Join(config.StoreDir, JetStreamStoreDir) } @@ -171,9 +175,60 @@ func (s *Server) EnableJetStream(config *JetStreamConfig) error { return err } + if ek := s.getOpts().JetStreamKey; ek != _EMPTY_ { + s.Warnf("JetStream Encryption is Beta") + } + return s.enableJetStream(cfg) } +// Function signature to generate a key encryption key. +type keyGen func(context []byte) []byte + +// Return a key generation function or nil if encryption not enabled. +// keyGen defined in filestore.go - keyGen func(iv, context []byte) []byte +func (s *Server) jsKeyGen(info string) keyGen { + if ek := s.getOpts().JetStreamKey; ek != _EMPTY_ { + return func(context []byte) []byte { + h := hmac.New(sha256.New, []byte(ek)) + h.Write([]byte(info)) + h.Write(context) + return h.Sum(nil) + } + } + return nil +} + +// Decode the encrypted metafile. +func (s *Server) decryptMeta(ekey, buf []byte, acc, context string) ([]byte, error) { + if len(ekey) != metaKeySize { + return nil, errors.New("bad encryption key") + } + prf := s.jsKeyGen(acc) + if prf == nil { + return nil, errNoEncryption + } + kek, err := chacha20poly1305.NewX(prf([]byte(context))) + if err != nil { + return nil, err + } + ns := kek.NonceSize() + seed, err := kek.Open(nil, ekey[:ns], ekey[ns:], nil) + if err != nil { + return nil, err + } + aek, err := chacha20poly1305.NewX(seed[:]) + if err != nil { + return nil, err + } + ns = kek.NonceSize() + plain, err := aek.Open(nil, buf[:ns], buf[ns:], nil) + if err != nil { + return nil, err + } + return plain, nil +} + // Check to make sure directory has the jetstream directory. // We will have it properly configured here now regardless, so need to look inside. func (s *Server) checkStoreDir(cfg *JetStreamConfig) error { @@ -613,20 +668,57 @@ func (s *Server) configAllJetStreamAccounts() error { return nil } -// JetStreamEnabled reports if jetstream is enabled. +func (js *jetStream) isEnabled() bool { + if js == nil { + return false + } + js.mu.RLock() + defer js.mu.RUnlock() + return !js.disabled +} + +// JetStreamEnabled reports if jetstream is enabled for this server. func (s *Server) JetStreamEnabled() bool { + var js *jetStream s.mu.Lock() - js := s.js + js = s.js s.mu.Unlock() + return js.isEnabled() +} - var enabled bool - if js != nil { - js.mu.RLock() - enabled = !js.disabled - js.mu.RUnlock() +// JetStreamEnabledForDomain will report if any servers have JetStream enabled within this domain. +func (s *Server) JetStreamEnabledForDomain() bool { + if s.JetStreamEnabled() { + return true } - return enabled + var jsFound bool + // If we are here we do not have JetStream enabled for ourselves, but we need to check all connected servers. + // TODO(dlc) - Could optimize and memoize this. + s.nodeToInfo.Range(func(k, v interface{}) bool { + // This should not be dependent on online status, so only check js. + if v.(nodeInfo).js { + jsFound = true + return false + } + return true + }) + + return jsFound +} + +// Helper to see if we have a non-empty domain defined in any server we know about. +func (s *Server) jetStreamHasDomainConfigured() bool { + var found bool + s.nodeToInfo.Range(func(k, v interface{}) bool { + if v.(nodeInfo).domain != _EMPTY_ { + found = true + return false + } + return true + }) + + return found } // Will migrate off ephemerals if possible. @@ -775,7 +867,7 @@ func (s *Server) JetStreamNumAccounts() int { func (s *Server) JetStreamReservedResources() (int64, int64, error) { js := s.getJetStream() if js == nil { - return -1, -1, ErrJetStreamNotEnabled + return -1, -1, ApiErrors[JSNotEnabledForAccountErr] } js.mu.RLock() defer js.mu.RUnlock() @@ -810,24 +902,24 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { sendq := s.sys.sendq s.mu.Unlock() - js := s.getJetStream() + // No limits means we dynamically set up limits. + // We also place limits here so we know that the account is configured for JetStream. + if limits == nil { + limits = dynamicJSAccountLimits + } + js := s.getJetStream() if js == nil { - // Place limits here so we know that the account is configured for JetStream. - if limits == nil { - limits = dynamicJSAccountLimits - } a.assignJetStreamLimits(limits) - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledErr] } if s.SystemAccount() == a { return fmt.Errorf("jetstream can not be enabled on the system account") } - // No limits means we dynamically set up limits. if limits == nil { - limits = js.dynamicAccountLimits() + limits = dynamicJSAccountLimits } a.assignJetStreamLimits(limits) @@ -879,10 +971,14 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { if err := os.MkdirAll(sdir, defaultDirPerms); err != nil { return fmt.Errorf("could not create storage streams directory - %v", err) } - } + // Just need to make sure we can write to the directory. + // Remove the directory will create later if needed. + os.RemoveAll(sdir) - // Restore any state here. - s.Debugf("Recovering JetStream state for account %q", a.Name) + } else { + // Restore any state here. + s.Debugf("Recovering JetStream state for account %q", a.Name) + } // Check templates first since messsage sets will need proper ownership. // FIXME(dlc) - Make this consistent. @@ -943,7 +1039,7 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { metafile := path.Join(mdir, JetStreamMetaFile) metasum := path.Join(mdir, JetStreamMetaFileSum) if _, err := os.Stat(metafile); os.IsNotExist(err) { - s.Warnf(" Missing Stream metafile for %q", metafile) + s.Warnf(" Missing stream metafile for %q", metafile) continue } buf, err := ioutil.ReadFile(metafile) @@ -952,7 +1048,7 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { continue } if _, err := os.Stat(metasum); os.IsNotExist(err) { - s.Warnf(" Missing Stream checksum for %q", metasum) + s.Warnf(" Missing stream checksum for %q", metasum) continue } sum, err := ioutil.ReadFile(metasum) @@ -967,20 +1063,34 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { continue } + // Check if we are encrypted. + if key, err := ioutil.ReadFile(path.Join(mdir, JetStreamMetaFileKey)); err == nil { + s.Debugf(" Stream metafile is encrypted, reading encrypted keyfile") + if len(key) != metaKeySize { + s.Warnf(" Bad stream encryption key length of %d", len(key)) + continue + } + // Decode the buffer before proceeding. + if buf, err = s.decryptMeta(key, buf, a.Name, fi.Name()); err != nil { + s.Warnf(" Error decrypting our stream metafile: %v", err) + continue + } + } + var cfg FileStreamInfo if err := json.Unmarshal(buf, &cfg); err != nil { - s.Warnf(" Error unmarshalling Stream metafile: %v", err) + s.Warnf(" Error unmarshalling stream metafile: %v", err) continue } if cfg.Template != _EMPTY_ { if err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name); err != nil { - s.Warnf(" Error adding Stream %q to Template %q: %v", cfg.Name, cfg.Template, err) + s.Warnf(" Error adding stream %q to template %q: %v", cfg.Name, cfg.Template, err) } } mset, err := a.addStream(&cfg.StreamConfig) if err != nil { - s.Warnf(" Error recreating Stream %q: %v", cfg.Name, err) + s.Warnf(" Error recreating stream %q: %v", cfg.Name, err) continue } if !cfg.Created.IsZero() { @@ -988,19 +1098,19 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { } state := mset.state() - s.Noticef(" Restored %s messages for Stream %q", comma(int64(state.Msgs)), fi.Name()) + s.Noticef(" Restored %s messages for stream %q", comma(int64(state.Msgs)), fi.Name()) // Now do the consumers. odir := path.Join(sdir, fi.Name(), consumerDir) ofis, _ := ioutil.ReadDir(odir) if len(ofis) > 0 { - s.Noticef(" Recovering %d Consumers for Stream - %q", len(ofis), fi.Name()) + s.Noticef(" Recovering %d consumers for stream - %q", len(ofis), fi.Name()) } for _, ofi := range ofis { metafile := path.Join(odir, ofi.Name(), JetStreamMetaFile) metasum := path.Join(odir, ofi.Name(), JetStreamMetaFileSum) if _, err := os.Stat(metafile); os.IsNotExist(err) { - s.Warnf(" Missing Consumer Metafile %q", metafile) + s.Warnf(" Missing consumer metafile %q", metafile) continue } buf, err := ioutil.ReadFile(metafile) @@ -1009,12 +1119,23 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { continue } if _, err := os.Stat(metasum); os.IsNotExist(err) { - s.Warnf(" Missing Consumer checksum for %q", metasum) + s.Warnf(" Missing consumer checksum for %q", metasum) continue } + + // Check if we are encrypted. + if key, err := ioutil.ReadFile(path.Join(odir, ofi.Name(), JetStreamMetaFileKey)); err == nil { + s.Debugf(" Consumer metafile is encrypted, reading encrypted keyfile") + // Decode the buffer before proceeding. + if buf, err = s.decryptMeta(key, buf, a.Name, fi.Name()+tsep+ofi.Name()); err != nil { + s.Warnf(" Error decrypting our consumer metafile: %v", err) + continue + } + } + var cfg FileConsumerInfo if err := json.Unmarshal(buf, &cfg); err != nil { - s.Warnf(" Error unmarshalling Consumer metafile: %v", err) + s.Warnf(" Error unmarshalling consumer metafile: %v", err) continue } isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig) @@ -1025,7 +1146,7 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { } obs, err := mset.addConsumer(&cfg.ConsumerConfig) if err != nil { - s.Warnf(" Error adding Consumer: %v", err) + s.Warnf(" Error adding consumer: %v", err) continue } if isEphemeral { @@ -1038,10 +1159,9 @@ func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error { err = obs.readStoredState() obs.mu.Unlock() if err != nil { - s.Warnf(" Error restoring Consumer state: %v", err) + s.Warnf(" Error restoring consumer state: %v", err) } } - mset.clearFilterIndex() } // Make sure to cleanup and old remaining snapshots. @@ -1107,14 +1227,14 @@ func (a *Account) lookupStream(name string) (*stream, error) { a.mu.RUnlock() if jsa == nil { - return nil, ErrJetStreamNotEnabled + return nil, ApiErrors[JSNotEnabledForAccountErr] } jsa.mu.Lock() defer jsa.mu.Unlock() mset, ok := jsa.streams[name] if !ok { - return nil, ErrJetStreamStreamNotFound + return nil, ApiErrors[JSStreamNotFoundErr] } return mset, nil } @@ -1131,14 +1251,14 @@ func (a *Account) UpdateJetStreamLimits(limits *JetStreamAccountLimits) error { } js := s.getJetStream() if js == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledErr] } if jsa == nil { - return ErrJetStreamNotEnabledForAccount + return ApiErrors[JSNotEnabledForAccountErr] } if limits == nil { - limits = js.dynamicAccountLimits() + limits = dynamicJSAccountLimits } // Calculate the delta between what we have and what we want. @@ -1212,30 +1332,10 @@ func (a *Account) JetStreamUsage() JetStreamAccountStats { // DisableJetStream will disable JetStream for this account. func (a *Account) DisableJetStream() error { - a.mu.Lock() - s := a.srv - a.js = nil - a.mu.Unlock() - - if s == nil { - return fmt.Errorf("jetstream account not registered") - } - - js := s.getJetStream() - if js == nil { - return ErrJetStreamNotEnabled - } - - // Remove service imports. - for _, export := range allJsExports { - a.removeServiceImport(export) - } - - return js.disableJetStream(js.lookupAccount(a)) + return a.removeJetStream() } -// removeJetStream is called when JetStream has been disabled for this -// server. +// removeJetStream is called when JetStream has been disabled for this account. func (a *Account) removeJetStream() error { a.mu.Lock() s := a.srv @@ -1248,7 +1348,7 @@ func (a *Account) removeJetStream() error { js := s.getJetStream() if js == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledForAccountErr] } return js.disableJetStream(js.lookupAccount(a)) @@ -1257,7 +1357,7 @@ func (a *Account) removeJetStream() error { // Disable JetStream for the account. func (js *jetStream) disableJetStream(jsa *jsAccount) error { if jsa == nil || jsa.account == nil { - return ErrJetStreamNotEnabledForAccount + return ApiErrors[JSNotEnabledForAccountErr] } js.mu.Lock() @@ -1273,6 +1373,9 @@ func (js *jetStream) disableJetStream(jsa *jsAccount) error { // jetStreamConfigured reports whether the account has JetStream configured, regardless of this // servers JetStream status. func (a *Account) jetStreamConfigured() bool { + if a == nil { + return false + } a.mu.RLock() defer a.mu.RUnlock() return a.jsLimits != nil @@ -1289,7 +1392,7 @@ func (a *Account) JetStreamEnabled() bool { return enabled } -func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, subject, _ string, msg []byte) { +func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, _ *Account, subject, _ string, msg []byte) { const usageSize = 32 jsa.mu.Lock() @@ -1428,16 +1531,16 @@ func (jsa *jsAccount) limitsExceeded(storeType StorageType) bool { // Lock should be held. func (jsa *jsAccount) checkLimits(config *StreamConfig) error { if jsa.limits.MaxStreams > 0 && len(jsa.streams) >= jsa.limits.MaxStreams { - return fmt.Errorf("maximum number of streams reached") + return ApiErrors[JSMaximumStreamsLimitErr] } // Check MaxConsumers if config.MaxConsumers > 0 && jsa.limits.MaxConsumers > 0 && config.MaxConsumers > jsa.limits.MaxConsumers { - return fmt.Errorf("maximum consumers exceeds account limit") + return ApiErrors[JSMaximumConsumersLimitErr] } // Check storage, memory or disk. if config.MaxBytes > 0 { - return jsa.checkBytesLimits(config.MaxBytes*int64(config.Replicas), config.Storage) + return jsa.checkBytesLimits(config.MaxBytes, config.Storage, config.Replicas) } return nil } @@ -1445,32 +1548,35 @@ func (jsa *jsAccount) checkLimits(config *StreamConfig) error { // Check if additional bytes will exceed our account limits. // This should account for replicas. // Lock should be held. -func (jsa *jsAccount) checkBytesLimits(addBytes int64, storage StorageType) error { +func (jsa *jsAccount) checkBytesLimits(addBytes int64, storage StorageType, replicas int) error { + if replicas < 1 { + replicas = 1 + } + js, totalBytes := jsa.js, addBytes*int64(replicas) + switch storage { case MemoryStorage: // Account limits defined. if jsa.limits.MaxMemory > 0 { - if jsa.memReserved+addBytes > jsa.limits.MaxMemory { - return ErrMemoryResourcesExceeded + if jsa.memReserved+totalBytes > jsa.limits.MaxMemory { + return ApiErrors[JSMemoryResourcesExceededErr] } } else { // Account is unlimited, check if this server can handle request. - js := jsa.js if js.memReserved+addBytes > js.config.MaxMemory { - return ErrMemoryResourcesExceeded + return ApiErrors[JSMemoryResourcesExceededErr] } } case FileStorage: // Account limits defined. if jsa.limits.MaxStore > 0 { - if jsa.storeReserved+addBytes > jsa.limits.MaxStore { - return ErrStorageResourcesExceeded + if jsa.storeReserved+totalBytes > jsa.limits.MaxStore { + return ApiErrors[JSStorageResourcesExceededErr] } } else { // Account is unlimited, check if this server can handle request. - js := jsa.js if js.storeReserved+addBytes > js.config.MaxStore { - return ErrStorageResourcesExceeded + return ApiErrors[JSStorageResourcesExceededErr] } } } @@ -1529,39 +1635,16 @@ func (js *jetStream) lookupAccount(a *Account) *jsAccount { return jsa } -// Will dynamically create limits for this account. -func (js *jetStream) dynamicAccountLimits() *JetStreamAccountLimits { - js.mu.RLock() - // For now use all resources. Mostly meant for $G in non-account mode. - limits := &JetStreamAccountLimits{js.config.MaxMemory, js.config.MaxStore, -1, -1} - js.mu.RUnlock() - return limits -} - // Report on JetStream stats and usage for this server. func (js *jetStream) usageStats() *JetStreamStats { var stats JetStreamStats - - var _jsa [512]*jsAccount - accounts := _jsa[:0] - js.mu.RLock() - for _, jsa := range js.accounts { - accounts = append(accounts, jsa) - } + stats.Accounts = len(js.accounts) js.mu.RUnlock() - - stats.Accounts = len(accounts) - - // Collect account information. - for _, jsa := range accounts { - jsa.mu.RLock() - stats.Memory += uint64(jsa.usage.mem) - stats.Store += uint64(jsa.usage.store) - stats.API.Total += jsa.usage.api - stats.API.Errors += jsa.usage.err - jsa.mu.RUnlock() - } + stats.API.Total = (uint64)(atomic.LoadInt64(&js.apiCalls)) + stats.API.Errors = (uint64)(atomic.LoadInt64(&js.apiErrors)) + stats.Memory = (uint64)(atomic.LoadInt64(&js.memTotal)) + stats.Store = (uint64)(atomic.LoadInt64(&js.storeTotal)) return &stats } @@ -1573,10 +1656,10 @@ func (js *jetStream) sufficientResources(limits *JetStreamAccountLimits) error { } if js.memReserved+limits.MaxMemory > js.config.MaxMemory { - return ErrMemoryResourcesExceeded + return ApiErrors[JSMemoryResourcesExceededErr] } if js.storeReserved+limits.MaxStore > js.config.MaxStore { - return ErrStorageResourcesExceeded + return ApiErrors[JSStorageResourcesExceededErr] } return nil } @@ -1664,7 +1747,7 @@ func (a *Account) checkForJetStream() (*Server, *jsAccount, error) { a.mu.RUnlock() if s == nil || jsa == nil { - return nil, nil, ErrJetStreamNotEnabledForAccount + return nil, nil, ApiErrors[JSNotEnabledForAccountErr] } return s, jsa, nil @@ -1780,7 +1863,7 @@ func (t *streamTemplate) createTemplateSubscriptions() error { return nil } -func (t *streamTemplate) processInboundTemplateMsg(_ *subscription, pc *client, subject, reply string, msg []byte) { +func (t *streamTemplate) processInboundTemplateMsg(_ *subscription, pc *client, acc *Account, subject, reply string, msg []byte) { if t == nil || t.jsa == nil { return } @@ -1793,7 +1876,6 @@ func (t *streamTemplate) processInboundTemplateMsg(_ *subscription, pc *client, jsa.mu.Unlock() return } - acc := jsa.account jsa.mu.Unlock() // Check if we are at the maximum and grab some variables. @@ -1824,7 +1906,7 @@ func (t *streamTemplate) processInboundTemplateMsg(_ *subscription, pc *client, } // Process this message directly by invoking mset. - mset.processInboundJetStreamMsg(nil, pc, subject, reply, msg) + mset.processInboundJetStreamMsg(nil, pc, acc, subject, reply, msg) } // lookupStreamTemplate looks up the names stream template. @@ -1875,7 +1957,7 @@ func (t *streamTemplate) delete() error { t.mu.Unlock() if jsa == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledForAccountErr] } jsa.mu.Lock() @@ -1919,7 +2001,7 @@ func (t *streamTemplate) delete() error { func (a *Account) deleteStreamTemplate(name string) error { t, err := a.lookupStreamTemplate(name) if err != nil { - return err + return ApiErrors[JSStreamTemplateNotFoundErr] } return t.delete() } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go index c1cce472..561a1270 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go @@ -39,10 +39,9 @@ const ( // For constructing JetStream domain prefixes. jsDomainAPI = "$JS.%s.API.>" - // JSApiPrefix JSApiPrefix = "$JS.API" - // JSApiInfo is for obtaining general information about JetStream for this account. + // JSApiAccountInfo is for obtaining general information about JetStream for this account. // Will return JSON response. JSApiAccountInfo = "$JS.API.INFO" @@ -91,7 +90,7 @@ const ( JSApiStreamDelete = "$JS.API.STREAM.DELETE.*" JSApiStreamDeleteT = "$JS.API.STREAM.DELETE.%s" - // JSApiPurgeStream is the endpoint to purge streams. + // JSApiStreamPurge is the endpoint to purge streams. // Will return JSON response. JSApiStreamPurge = "$JS.API.STREAM.PURGE.*" JSApiStreamPurgeT = "$JS.API.STREAM.PURGE.%s" @@ -104,11 +103,11 @@ const ( JSApiStreamSnapshotT = "$JS.API.STREAM.SNAPSHOT.%s" // JSApiStreamRestore is the endpoint to restore a stream from a snapshot. - // Caller should resond to each chunk with a nil body response. + // Caller should respond to each chunk with a nil body response. JSApiStreamRestore = "$JS.API.STREAM.RESTORE.*" JSApiStreamRestoreT = "$JS.API.STREAM.RESTORE.%s" - // JSApiDeleteMsg is the endpoint to delete messages from a stream. + // JSApiMsgDelete is the endpoint to delete messages from a stream. // Will return JSON response. JSApiMsgDelete = "$JS.API.STREAM.MSG.DELETE.*" JSApiMsgDeleteT = "$JS.API.STREAM.MSG.DELETE.%s" @@ -134,15 +133,14 @@ const ( JSApiConsumersT = "$JS.API.CONSUMER.NAMES.%s" // JSApiConsumerList is the endpoint that will return all detailed consumer information - JSApiConsumerList = "$JS.API.CONSUMER.LIST.*" - JSApiConsumerListT = "$JS.API.CONSUMER.LIST.%s" + JSApiConsumerList = "$JS.API.CONSUMER.LIST.*" // JSApiConsumerInfo is for obtaining general information about a consumer. // Will return JSON response. JSApiConsumerInfo = "$JS.API.CONSUMER.INFO.*.*" JSApiConsumerInfoT = "$JS.API.CONSUMER.INFO.%s.%s" - // JSApiDeleteConsumer is the endpoint to delete consumers. + // JSApiConsumerDelete is the endpoint to delete consumers. // Will return JSON response. JSApiConsumerDelete = "$JS.API.CONSUMER.DELETE.*.*" JSApiConsumerDeleteT = "$JS.API.CONSUMER.DELETE.%s.%s" @@ -150,6 +148,9 @@ const ( // JSApiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. JSApiRequestNextT = "$JS.API.CONSUMER.MSG.NEXT.%s.%s" + // jsRequestNextPre + jsRequestNextPre = "$JS.API.CONSUMER.MSG.NEXT." + // For snapshots and restores. The ack will have additional tokens. jsSnapshotAckT = "$JS.SNAPSHOT.ACK.%s.%s" jsRestoreDeliverT = "$JS.SNAPSHOT.RESTORE.%s.%s" @@ -231,13 +232,13 @@ const ( // JSAdvisoryStreamRestoreCompletePre notification that a restore was completed. JSAdvisoryStreamRestoreCompletePre = "$JS.EVENT.ADVISORY.STREAM.RESTORE_COMPLETE" - // JSAdvisoryStreamLeaderElectPre notification that a replicated stream has elected a leader. + // JSAdvisoryStreamLeaderElectedPre notification that a replicated stream has elected a leader. JSAdvisoryStreamLeaderElectedPre = "$JS.EVENT.ADVISORY.STREAM.LEADER_ELECTED" // JSAdvisoryStreamQuorumLostPre notification that a stream and its consumers are stalled. JSAdvisoryStreamQuorumLostPre = "$JS.EVENT.ADVISORY.STREAM.QUORUM_LOST" - // JSAdvisoryConsumerLeaderElectPre notification that a replicated consumer has elected a leader. + // JSAdvisoryConsumerLeaderElectedPre notification that a replicated consumer has elected a leader. JSAdvisoryConsumerLeaderElectedPre = "$JS.EVENT.ADVISORY.CONSUMER.LEADER_ELECTED" // JSAdvisoryConsumerQuorumLostPre notification that a consumer is stalled. @@ -254,24 +255,29 @@ const ( JSAuditAdvisory = "$JS.EVENT.ADVISORY.API" ) -// Maximum name lengths for streams, consumers and templates. +// JSMaxDescription is the maximum description length for streams and consumers. +const JSMaxDescriptionLen = 4 * 1024 + +// JSMaxNameLen is the maximum name lengths for streams, consumers and templates. const JSMaxNameLen = 256 // Responses for API calls. -// ApiError is included in all responses if there was an error. -// TODO(dlc) - Move to more generic location. -type ApiError struct { - Code int `json:"code"` - Description string `json:"description,omitempty"` -} - // ApiResponse is a standard response from the JetStream JSON API type ApiResponse struct { Type string `json:"type"` Error *ApiError `json:"error,omitempty"` } +// ToError checks if the response has a error and if it does converts it to an error avoiding the pitfalls described by https://yourbasic.org/golang/gotcha-why-nil-error-not-equal-nil/ +func (r *ApiResponse) ToError() error { + if r.Error == nil { + return nil + } + + return r.Error +} + const JSApiOverloadedType = "io.nats.jetstream.api.v1.system_overloaded" // ApiPaged includes variables used to create paged responses from the JSON API @@ -310,12 +316,10 @@ type JSApiStreamDeleteResponse struct { const JSApiStreamDeleteResponseType = "io.nats.jetstream.api.v1.stream_delete_response" -// JSApiStreamInfoRequest. type JSApiStreamInfoRequest struct { DeletedDetails bool `json:"deleted_details,omitempty"` } -// JSApiStreamInfoResponse. type JSApiStreamInfoResponse struct { ApiResponse *StreamInfo @@ -323,7 +327,7 @@ type JSApiStreamInfoResponse struct { const JSApiStreamInfoResponseType = "io.nats.jetstream.api.v1.stream_info_response" -// Maximum entries we will return for streams or consumers lists. +// JSApiNamesLimit is the maximum entries we will return for streams or consumers lists. // TODO(dlc) - with header or request support could request chunked response. const JSApiNamesLimit = 1024 const JSApiListLimit = 256 @@ -354,7 +358,20 @@ type JSApiStreamListResponse struct { const JSApiStreamListResponseType = "io.nats.jetstream.api.v1.stream_list_response" -// JSApiStreamPurgeResponse. +// JSApiStreamPurgeRequest is optional request information to the purge API. +// Subject will filter the purge request to only messages that match the subject, which can have wildcards. +// Sequence will purge up to but not including this sequence and can be combined with subject filtering. +// Keep will specify how many messages to keep. This can also be combined with subject filtering. +// Note that Sequence and Keep are mutually exclusive, so both can not be set at the same time. +type JSApiStreamPurgeRequest struct { + // Purge up to but not including sequence. + Sequence uint64 `json:"seq,omitempty"` + // Subject to match against messages for the purge command. + Subject string `json:"filter,omitempty"` + // Number of messages to keep. + Keep uint64 `json:"keep,omitempty"` +} + type JSApiStreamPurgeResponse struct { ApiResponse Success bool `json:"success,omitempty"` @@ -377,7 +394,6 @@ type JSApiMsgDeleteRequest struct { NoErase bool `json:"no_erase,omitempty"` } -// JSApiMsgDeleteResponse. type JSApiMsgDeleteResponse struct { ApiResponse Success bool `json:"success,omitempty"` @@ -416,8 +432,6 @@ type JSApiStreamRestoreRequest struct { State StreamState `json:"state"` } -const JSApiStreamRestoreRequestType = "io.nats.jetstream.api.v1.stream_restore_request" - // JSApiStreamRestoreResponse is the direct response to the restore request. type JSApiStreamRestoreResponse struct { ApiResponse @@ -486,10 +500,10 @@ const JSApiMetaServerRemoveResponseType = "io.nats.jetstream.api.v1.meta_server_ // JSApiMsgGetRequest get a message request. type JSApiMsgGetRequest struct { - Seq uint64 `json:"seq"` + Seq uint64 `json:"seq,omitempty"` + LastFor string `json:"last_by_subj,omitempty"` } -// JSApiMsgGetResponse. type JSApiMsgGetResponse struct { ApiResponse Message *StoredMsg `json:"message,omitempty"` @@ -500,7 +514,6 @@ const JSApiMsgGetResponseType = "io.nats.jetstream.api.v1.stream_msg_get_respons // JSWaitQueueDefaultMax is the default max number of outstanding requests for pull consumers. const JSWaitQueueDefaultMax = 512 -// JSApiConsumerCreateResponse. type JSApiConsumerCreateResponse struct { ApiResponse *ConsumerInfo @@ -508,7 +521,6 @@ type JSApiConsumerCreateResponse struct { const JSApiConsumerCreateResponseType = "io.nats.jetstream.api.v1.consumer_create_response" -// JSApiConsumerDeleteResponse. type JSApiConsumerDeleteResponse struct { ApiResponse Success bool `json:"success,omitempty"` @@ -516,7 +528,6 @@ type JSApiConsumerDeleteResponse struct { const JSApiConsumerDeleteResponseType = "io.nats.jetstream.api.v1.consumer_delete_response" -// JSApiConsumerInfoResponse. type JSApiConsumerInfoResponse struct { ApiResponse *ConsumerInfo @@ -524,12 +535,10 @@ type JSApiConsumerInfoResponse struct { const JSApiConsumerInfoResponseType = "io.nats.jetstream.api.v1.consumer_info_response" -// JSApiConsumersRequest type JSApiConsumersRequest struct { ApiPagedRequest } -// JSApiConsumerNamesResponse. type JSApiConsumerNamesResponse struct { ApiResponse ApiPaged @@ -538,7 +547,6 @@ type JSApiConsumerNamesResponse struct { const JSApiConsumerNamesResponseType = "io.nats.jetstream.api.v1.consumer_names_response" -// JSApiConsumerListResponse. type JSApiConsumerListResponse struct { ApiResponse ApiPaged @@ -562,7 +570,6 @@ type JSApiStreamTemplateCreateResponse struct { const JSApiStreamTemplateCreateResponseType = "io.nats.jetstream.api.v1.stream_template_create_response" -// JSApiStreamTemplateDeleteResponse type JSApiStreamTemplateDeleteResponse struct { ApiResponse Success bool `json:"success,omitempty"` @@ -578,7 +585,6 @@ type JSApiStreamTemplateInfoResponse struct { const JSApiStreamTemplateInfoResponseType = "io.nats.jetstream.api.v1.stream_template_info_response" -// JSApiStreamTemplatesRequest type JSApiStreamTemplatesRequest struct { ApiPagedRequest } @@ -592,64 +598,13 @@ type JSApiStreamTemplateNamesResponse struct { const JSApiStreamTemplateNamesResponseType = "io.nats.jetstream.api.v1.stream_template_names_response" -var ( - jsNotEnabledErr = &ApiError{Code: 503, Description: "JetStream not enabled for account"} - jsBadRequestErr = &ApiError{Code: 400, Description: "bad request"} - jsNotEmptyRequestErr = &ApiError{Code: 400, Description: "expected an empty request payload"} - jsInvalidJSONErr = &ApiError{Code: 400, Description: "invalid JSON"} - jsInsufficientErr = &ApiError{Code: 503, Description: "insufficient resources"} - jsNoConsumerErr = &ApiError{Code: 404, Description: "consumer not found"} - jsStreamMismatchErr = &ApiError{Code: 400, Description: "stream name in subject does not match request"} - jsNoClusterSupportErr = &ApiError{Code: 503, Description: "not currently supported in clustered mode"} - jsClusterNotAvailErr = &ApiError{Code: 503, Description: "JetStream system temporarily unavailable"} - jsClusterRequiredErr = &ApiError{Code: 503, Description: "JetStream clustering support required"} - jsPeerNotMemberErr = &ApiError{Code: 400, Description: "peer not a member"} - jsPeerRemapErr = &ApiError{Code: 503, Description: "peer remap failed"} - jsClusterIncompleteErr = &ApiError{Code: 503, Description: "incomplete results"} - jsClusterTagsErr = &ApiError{Code: 400, Description: "tags placement not supported for operation"} - jsClusterNoPeersErr = &ApiError{Code: 400, Description: "no suitable peers for placement"} - jsServerNotMemberErr = &ApiError{Code: 400, Description: "server is not a member of the cluster"} - jsNoMessageFoundErr = &ApiError{Code: 404, Description: "no message found"} - jsNoAccountErr = &ApiError{Code: 503, Description: "account not found"} -) - -// For easier handling of exports and imports. -var allJsExports = []string{ - JSApiAccountInfo, - JSApiTemplateCreate, - JSApiTemplates, - JSApiTemplateInfo, - JSApiTemplateDelete, - JSApiStreamCreate, - JSApiStreamUpdate, - JSApiStreams, - JSApiStreamList, - JSApiStreamInfo, - JSApiStreamDelete, - JSApiStreamPurge, - JSApiStreamSnapshot, - JSApiStreamRestore, - JSApiStreamRemovePeer, - JSApiStreamLeaderStepDown, - JSApiConsumerLeaderStepDown, - JSApiMsgDelete, - JSApiMsgGet, - JSApiConsumerCreate, - JSApiDurableCreate, - JSApiConsumers, - JSApiConsumerList, - JSApiConsumerInfo, - JSApiConsumerDelete, -} - -func (js *jetStream) apiDispatch(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, subject, reply string, rmsg []byte) { hdr, _ := c.msgParts(rmsg) if len(getHeader(ClientInfoHdr, hdr)) == 0 { return } js.mu.RLock() - s := js.srv - rr := js.apiSubs.Match(subject) + s, rr := js.srv, js.apiSubs.Match(subject) js.mu.RUnlock() // Shortcircuit. @@ -667,7 +622,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, subject, reply st // If this is directly from a client connection ok to do in place. if c.kind == CLIENT { - jsub.icb(sub, c, subject, reply, rmsg) + jsub.icb(sub, c, acc, subject, reply, rmsg) return } @@ -678,7 +633,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, subject, reply st atomic.AddInt64(&js.apiCalls, -1) ci, acc, _, msg, err := s.getRequestInfo(c, rmsg) if err == nil { - resp := &ApiResponse{Type: JSApiOverloadedType, Error: jsInsufficientErr} + resp := &ApiResponse{Type: JSApiOverloadedType, Error: ApiErrors[JSInsufficientResourcesErr]} s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } else { s.Warnf(badAPIRequestT, rmsg) @@ -699,7 +654,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, subject, reply st // Dispatch the API call to its own Go routine. go func() { - jsub.icb(sub, client, subject, reply, rmsg) + jsub.icb(sub, client, acc, subject, reply, rmsg) atomic.AddInt64(&js.apiCalls, -1) }() } @@ -707,7 +662,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, subject, reply st func (s *Server) setJetStreamExportSubs() error { js := s.getJetStream() if js == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledErr] } // This is the catch all now for all JetStream API calls. @@ -854,7 +809,9 @@ func (a *Account) trackAPIErr() { jsa.usage.err++ jsa.apiErrors++ jsa.sendClusterUsageUpdate() + js := jsa.js jsa.mu.Unlock() + atomic.AddInt64(&js.apiErrors, 1) } } @@ -866,14 +823,15 @@ const badAPIRequestT = "Malformed JetStream API Request: %q" func (a *Account) checkJetStream() (enabled, shouldError bool) { a.mu.RLock() defer a.mu.RUnlock() - return a.js != nil, a.nleafs == 0 + return a.js != nil, a.nleafs+a.nrleafs == 0 } // Request for current usage and limits for this account. -func (s *Server) jsAccountInfoRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsAccountInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } + ci, acc, _, msg, err := s.getRequestInfo(c, rmsg) if err != nil { s.Warnf(badAPIRequestT, msg) @@ -889,7 +847,7 @@ func (s *Server) jsAccountInfoRequest(sub *subscription, c *client, subject, rep return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -903,7 +861,7 @@ func (s *Server) jsAccountInfoRequest(sub *subscription, c *client, subject, rep if !doErr { return } - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] } else { stats := acc.JetStreamUsage() resp.JetStreamAccountStats = &stats @@ -912,6 +870,7 @@ func (s *Server) jsAccountInfoRequest(sub *subscription, c *client, subject, rep if err != nil { return } + s.sendAPIResponse(ci, acc, subject, reply, string(msg), string(b)) } @@ -929,7 +888,7 @@ func consumerNameFromSubject(subject string) string { } // Request to create a new template. -func (s *Server) jsTemplateCreateRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsTemplateCreateRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil { return } @@ -941,34 +900,34 @@ func (s *Server) jsTemplateCreateRequest(sub *subscription, c *client, subject, var resp = JSApiStreamTemplateCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamTemplateCreateResponseType}} if !acc.JetStreamEnabled() { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } // Not supported for now. if s.JetStreamIsClustered() { - resp.Error = jsNoClusterSupportErr + resp.Error = ApiErrors[JSClusterUnSupportFeatureErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } var cfg StreamTemplateConfig if err := json.Unmarshal(msg, &cfg); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } templateName := templateNameFromSubject(subject) if templateName != cfg.Name { - resp.Error = &ApiError{Code: 400, Description: "template name in subject does not match request"} + resp.Error = ApiErrors[JSTemplateNameNotMatchSubjectErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } t, err := acc.addStreamTemplate(&cfg) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamTemplateCreateErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -984,7 +943,7 @@ func (s *Server) jsTemplateCreateRequest(sub *subscription, c *client, subject, } // Request for the list of all template names. -func (s *Server) jsTemplateNamesRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsTemplateNamesRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil { return } @@ -996,14 +955,14 @@ func (s *Server) jsTemplateNamesRequest(sub *subscription, c *client, subject, r var resp = JSApiStreamTemplateNamesResponse{ApiResponse: ApiResponse{Type: JSApiStreamTemplateNamesResponseType}} if !acc.JetStreamEnabled() { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } // Not supported for now. if s.JetStreamIsClustered() { - resp.Error = jsNoClusterSupportErr + resp.Error = ApiErrors[JSClusterUnSupportFeatureErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1012,7 +971,7 @@ func (s *Server) jsTemplateNamesRequest(sub *subscription, c *client, subject, r if !isEmptyRequest(msg) { var req JSApiStreamTemplatesRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1048,7 +1007,7 @@ func (s *Server) jsTemplateNamesRequest(sub *subscription, c *client, subject, r } // Request for information about a stream template. -func (s *Server) jsTemplateInfoRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsTemplateInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil { return } @@ -1060,19 +1019,19 @@ func (s *Server) jsTemplateInfoRequest(sub *subscription, c *client, subject, re var resp = JSApiStreamTemplateInfoResponse{ApiResponse: ApiResponse{Type: JSApiStreamTemplateInfoResponseType}} if !acc.JetStreamEnabled() { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if !isEmptyRequest(msg) { - resp.Error = jsNotEmptyRequestErr + resp.Error = ApiErrors[JSNotEmptyRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } name := templateNameFromSubject(subject) t, err := acc.lookupStreamTemplate(name) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamTemplateNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1089,7 +1048,7 @@ func (s *Server) jsTemplateInfoRequest(sub *subscription, c *client, subject, re } // Request to delete a stream template. -func (s *Server) jsTemplateDeleteRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsTemplateDeleteRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil { return } @@ -1101,19 +1060,19 @@ func (s *Server) jsTemplateDeleteRequest(sub *subscription, c *client, subject, var resp = JSApiStreamTemplateDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamTemplateDeleteResponseType}} if !acc.JetStreamEnabled() { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if !isEmptyRequest(msg) { - resp.Error = jsNotEmptyRequestErr + resp.Error = ApiErrors[JSNotEmptyRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } name := templateNameFromSubject(subject) err = acc.deleteStreamTemplate(name) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamTemplateDeleteErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1130,22 +1089,8 @@ func (s *Server) jsonResponse(v interface{}) string { return string(b) } -func jsError(err error) *ApiError { - return &ApiError{ - Code: 500, - Description: err.Error(), - } -} - -func jsNotFoundError(err error) *ApiError { - return &ApiError{ - Code: 404, - Description: err.Error(), - } -} - // Request to create a stream. -func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, a *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1164,7 +1109,7 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1176,20 +1121,20 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } var cfg StreamConfig if err := json.Unmarshal(msg, &cfg); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } streamName := streamNameFromSubject(subject) if streamName != cfg.Name { - resp.Error = jsStreamMismatchErr + resp.Error = ApiErrors[JSStreamMismatchErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1223,22 +1168,22 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re // Do some pre-checking for mirror config to avoid cycles in clustered mode. if cfg.Mirror != nil { if len(cfg.Subjects) > 0 { - resp.Error = &ApiError{Code: 400, Description: "stream mirrors can not also contain subjects"} + resp.Error = ApiErrors[JSMirrorWithSubjectsErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if len(cfg.Sources) > 0 { - resp.Error = &ApiError{Code: 400, Description: "stream mirrors can not also contain other sources"} + resp.Error = ApiErrors[JSMirrorWithSourcesErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if cfg.Mirror.FilterSubject != _EMPTY_ { - resp.Error = &ApiError{Code: 400, Description: "stream mirrors can not contain filtered subjects"} + resp.Error = ApiErrors[JSMirrorWithSubjectFiltersErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if cfg.Mirror.OptStartSeq > 0 && cfg.Mirror.OptStartTime != nil { - resp.Error = &ApiError{Code: 400, Description: "stream mirrors can not have both start seq and start time configured"} + resp.Error = ApiErrors[JSMirrorWithStartSeqAndTimeErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1253,7 +1198,7 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re streamSubs = append(streamSubs, subs...) } if exists && cfg.MaxMsgSize > 0 && maxMsgSize > 0 && cfg.MaxMsgSize < maxMsgSize { - resp.Error = &ApiError{Code: 400, Description: "stream mirror must have max message size >= source"} + resp.Error = ApiErrors[JSMirrorMaxMessageSizeTooBigErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1282,7 +1227,7 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re apiPrefixes = append(apiPrefixes, src.External.ApiPrefix) } if exists && cfg.MaxMsgSize > 0 && maxMsgSize > 0 && cfg.MaxMsgSize < maxMsgSize { - resp.Error = &ApiError{Code: 400, Description: "stream source must have max message size >= target"} + resp.Error = ApiErrors[JSSourceMaxMessageSizeTooBigErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1291,13 +1236,13 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re // check prefix overlap with subjects for _, pfx := range deliveryPrefixes { if !IsValidPublishSubject(pfx) { - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("stream external delivery prefix %q must be a valid subject without wildcards", pfx)} + resp.Error = ApiErrors[JSStreamInvalidExternalDeliverySubjErrF].NewT("{prefix}", pfx) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } for _, sub := range streamSubs { if SubjectsCollide(sub, fmt.Sprintf("%s.%s", pfx, sub)) { - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("stream external delivery prefix %q overlaps with stream subject %q", pfx, sub)} + resp.Error = ApiErrors[JSStreamExternalDelPrefixOverlapsErrF].NewT("{prefix}", pfx, "{subject}", sub) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1311,7 +1256,7 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re return } if SubjectsCollide(apiPfx, JSApiPrefix) { - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("stream external api prefix %q must not overlap with %s", apiPfx, JSApiPrefix)} + resp.Error = ApiErrors[JSStreamExternalApiOverlapErrF].NewT("{prefix}", apiPfx, "{subject}", JSApiPrefix) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1324,7 +1269,7 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re mset, err := acc.addStream(&cfg) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamCreateErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1333,7 +1278,7 @@ func (s *Server) jsStreamCreateRequest(sub *subscription, c *client, subject, re } // Request to update a stream. -func (s *Server) jsStreamUpdateRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamUpdateRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1353,7 +1298,7 @@ func (s *Server) jsStreamUpdateRequest(sub *subscription, c *client, subject, re return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1365,28 +1310,28 @@ func (s *Server) jsStreamUpdateRequest(sub *subscription, c *client, subject, re if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } var ncfg StreamConfig if err := json.Unmarshal(msg, &ncfg); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } cfg, err := checkStreamCfg(&ncfg) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamInvalidConfigF].NewT("{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } streamName := streamNameFromSubject(subject) if streamName != cfg.Name { - resp.Error = jsStreamMismatchErr + resp.Error = ApiErrors[JSStreamMismatchErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1398,13 +1343,13 @@ func (s *Server) jsStreamUpdateRequest(sub *subscription, c *client, subject, re mset, err := acc.lookupStream(streamName) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if err := mset.update(&cfg); err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamUpdateErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1416,7 +1361,7 @@ func (s *Server) jsStreamUpdateRequest(sub *subscription, c *client, subject, re } // Request for the list of all stream names. -func (s *Server) jsStreamNamesRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamNamesRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1435,7 +1380,7 @@ func (s *Server) jsStreamNamesRequest(sub *subscription, c *client, subject, rep return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1447,7 +1392,7 @@ func (s *Server) jsStreamNamesRequest(sub *subscription, c *client, subject, rep if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -1459,7 +1404,7 @@ func (s *Server) jsStreamNamesRequest(sub *subscription, c *client, subject, rep if !isEmptyRequest(msg) { var req JSApiStreamNamesRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1480,7 +1425,7 @@ func (s *Server) jsStreamNamesRequest(sub *subscription, c *client, subject, rep } js.mu.RLock() for stream, sa := range cc.streams[acc.Name] { - if sa.err == ErrJetStreamNotAssigned { + if sa.err == ApiErrors[JSClusterNotAssignedErr] { continue } if filter != _EMPTY_ { @@ -1540,7 +1485,7 @@ func (s *Server) jsStreamNamesRequest(sub *subscription, c *client, subject, rep // Request for the list of all detailed stream info. // TODO(dlc) - combine with above long term -func (s *Server) jsStreamListRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamListRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1562,7 +1507,7 @@ func (s *Server) jsStreamListRequest(sub *subscription, c *client, subject, repl return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1574,7 +1519,7 @@ func (s *Server) jsStreamListRequest(sub *subscription, c *client, subject, repl if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -1584,7 +1529,7 @@ func (s *Server) jsStreamListRequest(sub *subscription, c *client, subject, repl if !isEmptyRequest(msg) { var req JSApiStreamNamesRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1630,7 +1575,7 @@ func (s *Server) jsStreamListRequest(sub *subscription, c *client, subject, repl } // Request for information about a stream. -func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, a *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1660,18 +1605,18 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, repl // We can't find the stream, so mimic what would be the errors below. if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } // No stream present. - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } else if sa == nil { if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] // Delaying an error response gives the leader a chance to respond before us s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil) } @@ -1684,7 +1629,7 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, repl // We have the stream assigned and a leader, so only the stream leader should answer. if !acc.JetStreamIsStreamLeader(streamName) && !isLeaderless { if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] // Delaying an error response gives the leader a chance to respond before us s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), sa.Group) } @@ -1694,7 +1639,7 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, repl if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -1704,7 +1649,7 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, repl if !isEmptyRequest(msg) { var req JSApiStreamInfoRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1713,7 +1658,7 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, repl mset, err := acc.lookupStream(streamName) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1742,7 +1687,7 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, subject, repl } // Request to have a stream leader stepdown. -func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1759,7 +1704,7 @@ func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, sub // If we are not in clustered mode this is a failed request. if !s.JetStreamIsClustered() { - resp.Error = jsClusterRequiredErr + resp.Error = ApiErrors[JSClusterRequiredErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1770,7 +1715,7 @@ func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, sub return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1780,7 +1725,7 @@ func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, sub js.mu.RUnlock() if isLeader && sa == nil { - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } else if sa == nil { @@ -1789,20 +1734,20 @@ func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, sub if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if !isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } // Check to see if we are a member of the group and if the group has no leader. if js.isGroupLeaderless(sa.Group) { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1814,7 +1759,7 @@ func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, sub mset, err := acc.lookupStream(name) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1831,7 +1776,7 @@ func (s *Server) jsStreamLeaderStepDownRequest(sub *subscription, c *client, sub } // Request to have a consumer leader stepdown. -func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1845,7 +1790,7 @@ func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, s // If we are not in clustered mode this is a failed request. if !s.JetStreamIsClustered() { - resp.Error = jsClusterRequiredErr + resp.Error = ApiErrors[JSClusterRequiredErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1856,7 +1801,7 @@ func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, s return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1870,7 +1815,7 @@ func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, s js.mu.RUnlock() if isLeader && sa == nil { - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } else if sa == nil { @@ -1881,13 +1826,13 @@ func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, s ca = sa.consumers[consumer] } if ca == nil { - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } // Check to see if we are a member of the group and if the group has no leader. if js.isGroupLeaderless(ca.Group) { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1898,26 +1843,26 @@ func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, s if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if !isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } mset, err := acc.lookupStream(stream) if err != nil { - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } o := mset.lookupConsumer(consumer) if o == nil { - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1930,7 +1875,7 @@ func (s *Server) jsConsumerLeaderStepDownRequest(sub *subscription, c *client, s } // Request to remove a peer from a clustered stream. -func (s *Server) jsStreamRemovePeerRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamRemovePeerRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -1947,7 +1892,7 @@ func (s *Server) jsStreamRemovePeerRequest(sub *subscription, c *client, subject // If we are not in clustered mode this is a failed request. if !s.JetStreamIsClustered() { - resp.Error = jsClusterRequiredErr + resp.Error = ApiErrors[JSClusterRequiredErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1958,7 +1903,7 @@ func (s *Server) jsStreamRemovePeerRequest(sub *subscription, c *client, subject return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -1974,32 +1919,32 @@ func (s *Server) jsStreamRemovePeerRequest(sub *subscription, c *client, subject if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } var req JSApiStreamRemovePeerRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if req.Peer == _EMPTY_ { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if sa == nil { // No stream present. - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2015,14 +1960,14 @@ func (s *Server) jsStreamRemovePeerRequest(sub *subscription, c *client, subject // Make sure we are a member. if !isMember { - resp.Error = jsPeerNotMemberErr + resp.Error = ApiErrors[JSClusterPeerNotMemberErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } // If we are here we have a valid peer member set for removal. if !js.removePeerFromStream(sa, nodeName) { - resp.Error = jsPeerRemapErr + resp.Error = ApiErrors[JSPeerRemapErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2032,7 +1977,7 @@ func (s *Server) jsStreamRemovePeerRequest(sub *subscription, c *client, subject } // Request to have the metaleader remove a peer from the system. -func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -2060,14 +2005,14 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, subje var resp = JSApiMetaServerRemoveResponse{ApiResponse: ApiResponse{Type: JSApiMetaServerRemoveResponseType}} if isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } var req JSApiMetaServerRemoveRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2084,7 +2029,7 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, subje js.mu.RUnlock() if found == _EMPTY_ { - resp.Error = jsServerNotMemberErr + resp.Error = ApiErrors[JSClusterServerNotMemberErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2100,7 +2045,7 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, subje // Request to have the meta leader stepdown. // These will only be received the the meta leaders, so less checking needed. -func (s *Server) jsLeaderStepDownRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsLeaderStepDownRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -2131,13 +2076,13 @@ func (s *Server) jsLeaderStepDownRequest(sub *subscription, c *client, subject, if !isEmptyRequest(msg) { var req JSApiLeaderStepdownRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if len(req.Placement.Tags) > 0 { // Tags currently not supported. - resp.Error = jsClusterTagsErr + resp.Error = ApiErrors[JSClusterTagsErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2153,7 +2098,7 @@ func (s *Server) jsLeaderStepDownRequest(sub *subscription, c *client, subject, } } if len(peers) == 0 { - resp.Error = jsClusterNoPeersErr + resp.Error = ApiErrors[JSClusterNoPeersErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2166,9 +2111,8 @@ func (s *Server) jsLeaderStepDownRequest(sub *subscription, c *client, subject, // Call actual stepdown. err = cc.meta.StepDown(preferredLeader) - if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSRaftGeneralErrF].ErrOrNewT(err, "{err}", err) } else { resp.Success = true } @@ -2195,7 +2139,7 @@ func isEmptyRequest(req []byte) bool { } // Request to delete a stream. -func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -2214,7 +2158,7 @@ func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, subject, re return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2226,14 +2170,14 @@ func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, subject, re if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if !isEmptyRequest(msg) { - resp.Error = jsNotEmptyRequestErr + resp.Error = ApiErrors[JSNotEmptyRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2247,13 +2191,13 @@ func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, subject, re mset, err := acc.lookupStream(stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if err := mset.delete(); err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamDeleteErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2263,7 +2207,7 @@ func (s *Server) jsStreamDeleteRequest(sub *subscription, c *client, subject, re // Request to delete a message. // This expects a stream sequence number as the msg body. -func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -2285,7 +2229,7 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2298,13 +2242,13 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply // We can't find the stream, so mimic what would be the errors below. if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } // No stream present. - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } else if sa == nil { @@ -2313,7 +2257,7 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply // Check to see if we are a member of the group and if the group has no leader. if js.isGroupLeaderless(sa.Group) { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2326,26 +2270,26 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } var req JSApiMsgDeleteRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } mset, err := acc.lookupStream(stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2362,9 +2306,9 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply removed, err = mset.eraseMsg(req.Seq) } if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamMsgDeleteFailedF].ErrOrNewT(err, "{err}", err) } else if !removed { - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", req.Seq)} + resp.Error = ApiErrors[JSSequenceNotFoundErrF].NewT("{seq}", req.Seq) } else { resp.Success = true } @@ -2372,7 +2316,7 @@ func (s *Server) jsMsgDeleteRequest(sub *subscription, c *client, subject, reply } // Request to get a raw stream message. -func (s *Server) jsMsgGetRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsMsgGetRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -2394,7 +2338,7 @@ func (s *Server) jsMsgGetRequest(sub *subscription, c *client, subject, reply st return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2407,13 +2351,13 @@ func (s *Server) jsMsgGetRequest(sub *subscription, c *client, subject, reply st // We can't find the stream, so mimic what would be the errors below. if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } // No stream present. - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } else if sa == nil { @@ -2422,7 +2366,7 @@ func (s *Server) jsMsgGetRequest(sub *subscription, c *client, subject, reply st // Check to see if we are a member of the group and if the group has no leader. if js.isGroupLeaderless(sa.Group) { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2435,39 +2379,55 @@ func (s *Server) jsMsgGetRequest(sub *subscription, c *client, subject, reply st if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } var req JSApiMsgGetRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] + s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + return + } + + // Check that we do not have both options set. + if req.Seq > 0 && req.LastFor != _EMPTY_ || req.Seq == 0 && req.LastFor == _EMPTY_ { + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } mset, err := acc.lookupStream(stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } - subj, hdr, msg, ts, err := mset.store.LoadMsg(req.Seq) + var subj string + var hdr []byte + var ts int64 + seq := req.Seq + + if req.Seq > 0 { + subj, hdr, msg, ts, err = mset.store.LoadMsg(req.Seq) + } else { + subj, seq, hdr, msg, ts, err = mset.store.LoadLastMsg(req.LastFor) + } if err != nil { - resp.Error = jsNoMessageFoundErr + resp.Error = ApiErrors[JSNoMessageFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } resp.Message = &StoredMsg{ Subject: subj, - Sequence: req.Seq, + Sequence: seq, Header: hdr, Data: msg, Time: time.Unix(0, ts).UTC(), @@ -2476,7 +2436,7 @@ func (s *Server) jsMsgGetRequest(sub *subscription, c *client, subject, reply st } // Request to purge a stream. -func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -2506,18 +2466,18 @@ func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, subject, rep // We can't find the stream, so mimic what would be the errors below. if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } // No stream present. - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } else if sa == nil { if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -2525,7 +2485,7 @@ func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, subject, rep // Check to see if we are a member of the group and if the group has no leader. if js.isGroupLeaderless(sa.Group) { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2533,7 +2493,7 @@ func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, subject, rep // We have the stream assigned and a leader, so only the stream leader should answer. if !acc.JetStreamIsStreamLeader(stream) { if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -2542,32 +2502,43 @@ func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, subject, rep if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } + var purgeRequest *JSApiStreamPurgeRequest if !isEmptyRequest(msg) { - resp.Error = jsNotEmptyRequestErr - s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) - return + var req JSApiStreamPurgeRequest + if err := json.Unmarshal(msg, &req); err != nil { + resp.Error = ApiErrors[JSInvalidJSONErr] + s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + return + } + if req.Sequence > 0 && req.Keep > 0 { + resp.Error = ApiErrors[JSBadRequestErr] + s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + return + } + purgeRequest = &req } + mset, err := acc.lookupStream(stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if s.JetStreamIsClustered() { - s.jsClusteredStreamPurgeRequest(ci, acc, mset, stream, subject, reply, rmsg) + s.jsClusteredStreamPurgeRequest(ci, acc, mset, stream, subject, reply, rmsg, purgeRequest) return } - purged, err := mset.purge() + purged, err := mset.purge(purgeRequest) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamGeneralErrorF].ErrOrNewT(err, "{err}", err) } else { resp.Purged = purged resp.Success = true @@ -2576,7 +2547,7 @@ func (s *Server) jsStreamPurgeRequest(sub *subscription, c *client, subject, rep } // Request to restore a stream. -func (s *Server) jsStreamRestoreRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamRestoreRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamIsLeader() { return } @@ -2588,19 +2559,19 @@ func (s *Server) jsStreamRestoreRequest(sub *subscription, c *client, subject, r var resp = JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}} if !acc.JetStreamEnabled() { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } var req JSApiStreamRestoreRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -2617,14 +2588,14 @@ func (s *Server) jsStreamRestoreRequest(sub *subscription, c *client, subject, r } if _, err := acc.lookupStream(stream); err == nil { - resp.Error = jsError(ErrJetStreamStreamAlreadyUsed) + resp.Error = ApiErrors[JSStreamNameExistErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -2649,8 +2620,8 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC tfile, err := ioutil.TempFile(snapDir, "js-restore-") if err != nil { - resp.Error = &ApiError{Code: 500, Description: "JetStream unable to open temp storage for restore"} - s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + resp.Error = ApiErrors[JSTempStorageFailedErr] + s.sendAPIErrResponse(ci, acc, subject, reply, msg, s.jsonResponse(&resp)) return nil } @@ -2685,7 +2656,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC var total int // FIXM(dlc) - Probably take out of network path eventually due to disk I/O? - processChunk := func(sub *subscription, c *client, subject, reply string, msg []byte) { + processChunk := func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { // We require reply subjects to communicate back failures, flow etc. If they do not have one log and cancel. if reply == _EMPTY_ { sub.client.processUnsub(sub.sid) @@ -2719,7 +2690,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC total += len(msg) if js.wouldExceedLimits(FileStorage, total) { s.resourcesExeededError() - resultCh <- result{ErrJetStreamResourcesExceeded, reply} + resultCh <- result{ApiErrors[JSInsufficientResourcesErr], reply} return } @@ -2741,14 +2712,14 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC if err != nil { tfile.Close() os.Remove(tfile.Name()) - resp.Error = &ApiError{Code: 500, Description: "JetStream unable to subscribe to restore snapshot " + restoreSubj + ": " + err.Error()} - s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) + resp.Error = ApiErrors[JSRestoreSubscribeFailedErrF].NewT("{subject}", restoreSubj, "{err}", err) + s.sendAPIErrResponse(ci, acc, subject, reply, msg, s.jsonResponse(&resp)) return nil } // Mark the subject so the end user knows where to send the snapshot chunks. resp.DeliverSubject = restoreSubj - s.sendAPIResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(resp)) + s.sendAPIResponse(ci, acc, subject, reply, msg, s.jsonResponse(resp)) doneCh := make(chan error, 1) @@ -2804,7 +2775,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}} if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamRestoreErrF].ErrOrNewT(err, "{err}", err) s.Warnf("Restore failed for %s for stream '%s > %s' in %v", friendlyBytes(int64(total)), streamName, acc.Name, end.Sub(start)) } else { @@ -2833,7 +2804,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC } // Process a snapshot request. -func (s *Server) jsStreamSnapshotRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsStreamSnapshotRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -2853,26 +2824,26 @@ func (s *Server) jsStreamSnapshotRequest(sub *subscription, c *client, subject, var resp = JSApiStreamSnapshotResponse{ApiResponse: ApiResponse{Type: JSApiStreamSnapshotResponseType}} if !acc.JetStreamEnabled() { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, smsg, s.jsonResponse(&resp)) return } if isEmptyRequest(msg) { - resp.Error = jsBadRequestErr + resp.Error = ApiErrors[JSBadRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, smsg, s.jsonResponse(&resp)) return } mset, err := acc.lookupStream(stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, smsg, s.jsonResponse(&resp)) return } if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, smsg, s.jsonResponse(&resp)) } return @@ -2880,12 +2851,12 @@ func (s *Server) jsStreamSnapshotRequest(sub *subscription, c *client, subject, var req JSApiStreamSnapshotRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, smsg, s.jsonResponse(&resp)) return } if !IsValidSubject(req.DeliverSubject) { - resp.Error = &ApiError{Code: 400, Description: "deliver subject not valid"} + resp.Error = ApiErrors[JSSnapshotDeliverSubjectInvalidErr] s.sendAPIErrResponse(ci, acc, subject, reply, smsg, s.jsonResponse(&resp)) return } @@ -2904,7 +2875,7 @@ func (s *Server) jsStreamSnapshotRequest(sub *subscription, c *client, subject, sr, err := mset.snapshot(0, req.CheckMsgs, !req.NoConsumers) if err != nil { s.Warnf("Snapshot of stream '%s > %s' failed: %v", mset.jsa.account.Name, mset.name(), err) - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamSnapshotErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, smsg, s.jsonResponse(&resp)) return } @@ -2991,7 +2962,7 @@ func (s *Server) streamSnapshot(ci *ClientInfo, acc *Account, mset *stream, sr * // We will place sequence number and size of chunk sent in the reply. ackSubj := fmt.Sprintf(jsSnapshotAckT, mset.name(), nuid.Next()) - ackSub, _ := mset.subscribeInternalUnlocked(ackSubj+".>", func(_ *subscription, _ *client, subject, _ string, _ []byte) { + ackSub, _ := mset.subscribeInternalUnlocked(ackSubj+".>", func(_ *subscription, _ *client, _ *Account, subject, _ string, _ []byte) { cs, _ := strconv.Atoi(tokenAt(subject, 6)) // This is very crude and simple, but ok for now. // This only matters when sending multiple chunks. @@ -3038,16 +3009,16 @@ done: } // Request to create a durable consumer. -func (s *Server) jsDurableCreateRequest(sub *subscription, c *client, subject, reply string, msg []byte) { - s.jsConsumerCreate(sub, c, subject, reply, msg, true) +func (s *Server) jsDurableCreateRequest(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) { + s.jsConsumerCreate(sub, c, acc, subject, reply, msg, true) } // Request to create a consumer. -func (s *Server) jsConsumerCreateRequest(sub *subscription, c *client, subject, reply string, msg []byte) { - s.jsConsumerCreate(sub, c, subject, reply, msg, false) +func (s *Server) jsConsumerCreateRequest(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) { + s.jsConsumerCreate(sub, c, acc, subject, reply, msg, false) } -func (s *Server) jsConsumerCreate(sub *subscription, c *client, subject, reply string, rmsg []byte, expectDurable bool) { +func (s *Server) jsConsumerCreate(sub *subscription, c *client, a *Account, subject, reply string, rmsg []byte, expectDurable bool) { if c == nil || !s.JetStreamEnabled() { return } @@ -3069,7 +3040,7 @@ func (s *Server) jsConsumerCreate(sub *subscription, c *client, subject, reply s var req CreateConsumerRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3087,7 +3058,7 @@ func (s *Server) jsConsumerCreate(sub *subscription, c *client, subject, reply s return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3100,43 +3071,43 @@ func (s *Server) jsConsumerCreate(sub *subscription, c *client, subject, reply s if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if streamName != req.Stream { - resp.Error = jsStreamMismatchErr + resp.Error = ApiErrors[JSStreamMismatchErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if expectDurable { if numTokens(subject) != 7 { - resp.Error = &ApiError{Code: 400, Description: "consumer expected to be durable but no durable name set in subject"} + resp.Error = ApiErrors[JSConsumerDurableNameNotInSubjectErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } // Now check on requirements for durable request. if req.Config.Durable == _EMPTY_ { - resp.Error = &ApiError{Code: 400, Description: "consumer expected to be durable but a durable name was not set"} + resp.Error = ApiErrors[JSConsumerDurableNameNotSetErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } consumerName := tokenAt(subject, 7) if consumerName != req.Config.Durable { - resp.Error = &ApiError{Code: 400, Description: "consumer name in subject does not match durable name in request"} + resp.Error = ApiErrors[JSConsumerDurableNameNotMatchSubjectErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } } else { if numTokens(subject) != 5 { - resp.Error = &ApiError{Code: 400, Description: "consumer expected to be ephemeral but detected a durable name set in subject"} + resp.Error = ApiErrors[JSConsumerEphemeralWithDurableInSubjectErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if req.Config.Durable != _EMPTY_ { - resp.Error = &ApiError{Code: 400, Description: "consumer expected to be ephemeral but a durable name was set in request"} + resp.Error = ApiErrors[JSConsumerEphemeralWithDurableNameErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3149,14 +3120,14 @@ func (s *Server) jsConsumerCreate(sub *subscription, c *client, subject, reply s stream, err := acc.lookupStream(req.Stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } o, err := stream.addConsumer(&req.Config) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSConsumerCreateErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3165,7 +3136,7 @@ func (s *Server) jsConsumerCreate(sub *subscription, c *client, subject, reply s } // Request for the list of all consumer names. -func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -3187,7 +3158,7 @@ func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, subject, r return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3199,7 +3170,7 @@ func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, subject, r if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -3209,7 +3180,7 @@ func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, subject, r if !isEmptyRequest(msg) { var req JSApiConsumersRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3229,14 +3200,14 @@ func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, subject, r sas := cc.streams[acc.Name] if sas == nil { js.mu.RUnlock() - resp.Error = jsNotFoundError(ErrJetStreamNotEnabled) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } sa := sas[streamName] if sa == nil || sa.err != nil { js.mu.RUnlock() - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3256,7 +3227,7 @@ func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, subject, r } else { mset, err := acc.lookupStream(streamName) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3285,7 +3256,7 @@ func (s *Server) jsConsumerNamesRequest(sub *subscription, c *client, subject, r } // Request for the list of all detailed consumer information. -func (s *Server) jsConsumerListRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsConsumerListRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -3308,7 +3279,7 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, subject, re return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3320,7 +3291,7 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, subject, re if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return @@ -3330,7 +3301,7 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, subject, re if !isEmptyRequest(msg) { var req JSApiConsumersRequest if err := json.Unmarshal(msg, &req); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3350,7 +3321,7 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, subject, re mset, err := acc.lookupStream(streamName) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3378,7 +3349,7 @@ func (s *Server) jsConsumerListRequest(sub *subscription, c *client, subject, re } // Request for information about an consumer. -func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -3407,29 +3378,30 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, subject, re if ca != nil && ca.pending { ca = nil } + ourID := cc.meta.ID() js.mu.RUnlock() if isLeader && ca == nil { // We can't find the consumer, so mimic what would be the errors below. if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if sa == nil { - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } // If we are here the consumer is not present. - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } else if ca == nil { if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] // Delaying an error response gives the leader a chance to respond before us s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil) } @@ -3438,7 +3410,7 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, subject, re // Check to see if we are a member of the group and if the group has no leader. if js.isGroupLeaderless(ca.Group) { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3446,14 +3418,16 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, subject, re // We have the consumer assigned and a leader, so only the consumer leader should answer. if !acc.JetStreamIsConsumerLeader(streamName, consumerName) { if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] // Delaying an error response gives the leader a chance to respond before us s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), ca.Group) - } else if rg := ca.Group; rg != nil && rg.node != nil && rg.isMember(cc.meta.ID()) { - // Check here if we are a member and this is just a new consumer that does not have a leader yet. - if rg.node.GroupLeader() == _EMPTY_ && !rg.node.HadPreviousLeader() { - resp.Error = jsNoConsumerErr - s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil) + } else if ca != nil { + if rg := ca.Group; rg != nil && rg.node != nil && rg.isMember(ourID) { + // Check here if we are a member and this is just a new consumer that does not have a leader yet. + if rg.node.GroupLeader() == _EMPTY_ && !rg.node.HadPreviousLeader() { + resp.Error = ApiErrors[JSConsumerNotFoundErr] + s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil) + } } } return @@ -3461,26 +3435,26 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, subject, re } if !acc.JetStreamEnabled() { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if !isEmptyRequest(msg) { - resp.Error = jsNotEmptyRequestErr + resp.Error = ApiErrors[JSNotEmptyRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } mset, err := acc.lookupStream(streamName) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } obs := mset.lookupConsumer(consumerName) if obs == nil { - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3489,7 +3463,7 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, subject, re } // Request to delete an Consumer. -func (s *Server) jsConsumerDeleteRequest(sub *subscription, c *client, subject, reply string, rmsg []byte) { +func (s *Server) jsConsumerDeleteRequest(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { if c == nil || !s.JetStreamEnabled() { return } @@ -3508,7 +3482,7 @@ func (s *Server) jsConsumerDeleteRequest(sub *subscription, c *client, subject, return } if js.isLeaderless() { - resp.Error = jsClusterNotAvailErr + resp.Error = ApiErrors[JSClusterNotAvailErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3520,13 +3494,13 @@ func (s *Server) jsConsumerDeleteRequest(sub *subscription, c *client, subject, if hasJS, doErr := acc.checkJetStream(); !hasJS { if doErr { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) } return } if !isEmptyRequest(msg) { - resp.Error = jsNotEmptyRequestErr + resp.Error = ApiErrors[JSNotEmptyRequestErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } @@ -3540,19 +3514,19 @@ func (s *Server) jsConsumerDeleteRequest(sub *subscription, c *client, subject, mset, err := acc.lookupStream(stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } obs := mset.lookupConsumer(consumer) if obs == nil { - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } if err := obs.delete(); err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamGeneralErrorF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp)) return } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go index 1f715187..96536aaa 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go @@ -128,11 +128,12 @@ type consumerAssignment struct { // streamPurge is what the stream leader will replicate when purging a stream. type streamPurge struct { - Client *ClientInfo `json:"client,omitempty"` - Stream string `json:"stream"` - LastSeq uint64 `json:"last_seq"` - Subject string `json:"subject"` - Reply string `json:"reply"` + Client *ClientInfo `json:"client,omitempty"` + Stream string `json:"stream"` + LastSeq uint64 `json:"last_seq"` + Subject string `json:"subject"` + Reply string `json:"reply"` + Request *JSApiStreamPurgeRequest `json:"request,omitempty"` } // streamMsgDelete is what the stream leader will replicate when deleting a message. @@ -204,7 +205,7 @@ func (s *Server) JetStreamIsCurrent() bool { func (s *Server) JetStreamSnapshotMeta() error { js := s.getJetStream() if js == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledErr] } js.mu.RLock() cc := js.cluster @@ -222,10 +223,10 @@ func (s *Server) JetStreamSnapshotMeta() error { func (s *Server) JetStreamStepdownStream(account, stream string) error { js, cc := s.getJetStreamCluster() if js == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledErr] } if cc == nil { - return ErrJetStreamNotClustered + return ApiErrors[JSClusterNotActiveErr] } // Grab account acc, err := s.LookupAccount(account) @@ -248,10 +249,10 @@ func (s *Server) JetStreamStepdownStream(account, stream string) error { func (s *Server) JetStreamSnapshotStream(account, stream string) error { js, cc := s.getJetStreamCluster() if js == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledForAccountErr] } if cc == nil { - return ErrJetStreamNotClustered + return ApiErrors[JSClusterNotActiveErr] } // Grab account acc, err := s.LookupAccount(account) @@ -267,7 +268,7 @@ func (s *Server) JetStreamSnapshotStream(account, stream string) error { mset.mu.RLock() if !mset.node.Leader() { mset.mu.RUnlock() - return ErrJetStreamNotLeader + return ApiErrors[JSNotEnabledForAccountErr] } n := mset.node mset.mu.RUnlock() @@ -291,7 +292,7 @@ func (s *Server) JetStreamClusterPeers() []string { var nodes []string for _, p := range peers { si, ok := s.nodeToInfo.Load(p.ID) - if !ok || si.(nodeInfo).offline { + if !ok || si.(nodeInfo).offline || !si.(nodeInfo).js { continue } nodes = append(nodes, si.(nodeInfo).name) @@ -435,7 +436,7 @@ func (s *Server) enableJetStreamClustering() error { } js := s.getJetStream() if js == nil { - return ErrJetStreamNotEnabled + return ApiErrors[JSNotEnabledForAccountErr] } // Already set. if js.cluster != nil { @@ -1191,7 +1192,7 @@ func (js *jetStream) createRaftGroup(rg *raftGroup, storage StorageType) error { s, cc := js.srv, js.cluster if cc == nil || cc.meta == nil { - return ErrJetStreamNotClustered + return ApiErrors[JSClusterNotActiveErr] } // If this is a single peer raft group or we are not a member return. @@ -1435,7 +1436,7 @@ func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment) { Stream: sa.Config.Name, Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}}, } - result.Restore.Error = jsError(sa.err) + result.Restore.Error = ApiErrors[JSStreamAssignmentErrF].ErrOrNewT(sa.err, "{err}", sa.err) js.mu.Unlock() // Send response to the metadata leader. They will forward to the user as needed. s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, result) @@ -1630,10 +1631,10 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco if isLeader && !isRecovering { var resp = JSApiMsgDeleteResponse{ApiResponse: ApiResponse{Type: JSApiMsgDeleteResponseType}} if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamMsgDeleteFailedF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp)) } else if !removed { - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", md.Seq)} + resp.Error = ApiErrors[JSSequenceNotFoundErrF].NewT("{seq}", md.Seq) s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp)) } else { resp.Success = true @@ -1655,7 +1656,7 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco } s := js.server() - purged, err := mset.purge() + purged, err := mset.purge(sp.Request) if err != nil { s.Warnf("JetStream cluster failed to purge stream %q for account %q: %v", sp.Stream, sp.Client.serviceAccount(), err) } @@ -1667,7 +1668,7 @@ func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isReco if isLeader && !isRecovering { var resp = JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}} if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamGeneralErrorF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(sp.Client, mset.account(), sp.Subject, sp.Reply, _EMPTY_, s.jsonResponse(resp)) } else { resp.Purged = purged @@ -1779,7 +1780,7 @@ func (js *jetStream) processStreamLeaderChange(mset *stream, isLeader bool) { // Send our response. var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}} if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamCreateErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp)) } else { resp.StreamInfo = &StreamInfo{ @@ -1920,7 +1921,7 @@ func (js *jetStream) processStreamAssignment(sa *streamAssignment) bool { Stream: stream, Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}, } - result.Response.Error = jsNoAccountErr + result.Response.Error = ApiErrors[JSNoAccountErr] s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, result) s.Warnf(ll) } else { @@ -2071,7 +2072,7 @@ func (js *jetStream) processClusterUpdateStream(acc *Account, sa *streamAssignme Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}, Update: true, } - result.Response.Error = jsError(err) + result.Response.Error = ApiErrors[JSStreamGeneralErrorF].ErrOrNewT(err, "{err}", err) js.mu.Unlock() // Send response to the metadata leader. They will forward to the user as needed. @@ -2141,7 +2142,7 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme s.Warnf("JetStream cluster error updating stream %q for account %q: %v", sa.Config.Name, acc.Name, err) mset.setStreamAssignment(osa) } - } else if err == ErrJetStreamStreamNotFound { + } else if err == ApiErrors[JSStreamNotFoundErr] { // Add in the stream here. mset, err = acc.addStreamWithAssignment(sa.Config, nil, sa) } @@ -2174,7 +2175,7 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme Stream: sa.Config.Name, Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}, } - result.Response.Error = jsError(err) + result.Response.Error = ApiErrors[JSStreamCreateErrF].ErrOrNewT(err, "{err}", err) } js.mu.Unlock() @@ -2218,7 +2219,7 @@ func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignme Stream: sa.Config.Name, Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}}, } - result.Restore.Error = jsError(sa.err) + result.Restore.Error = ApiErrors[JSStreamRestoreErrF].ErrOrNewT(sa.err, "{err}", sa.err) js.mu.Unlock() // Send response to the metadata leader. They will forward to the user as needed. b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines. @@ -2331,7 +2332,7 @@ func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, // Go ahead and delete the stream. mset, err := acc.lookupStream(sa.Config.Name) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) } else if mset != nil { err = mset.stop(true, wasLeader) } @@ -2346,7 +2347,7 @@ func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, if err != nil { if resp.Error == nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamGeneralErrorF].ErrOrNewT(err, "{err}", err) } s.sendAPIErrResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, s.jsonResponse(resp)) } else { @@ -2386,7 +2387,7 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) { Consumer: consumer, Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}, } - result.Response.Error = jsNoAccountErr + result.Response.Error = ApiErrors[JSNoAccountErr] s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, result) s.Warnf(ll) } else { @@ -2492,14 +2493,14 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state if err != nil { js.mu.Lock() s.Debugf("Consumer create failed, could not locate stream '%s > %s'", ca.Client.serviceAccount(), ca.Stream) - ca.err = ErrJetStreamStreamNotFound + ca.err = ApiErrors[JSStreamNotFoundErr] result := &consumerAssignmentResult{ Account: ca.Client.serviceAccount(), Stream: ca.Stream, Consumer: ca.Name, Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}, } - result.Response.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + result.Response.Error = ApiErrors[JSStreamNotFoundErr] s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, result) js.mu.Unlock() return @@ -2524,7 +2525,7 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state Consumer: ca.Name, Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}, } - result.Response.Error = jsNotFoundError(ErrJetStreamConsumerAlreadyUsed) + result.Response.Error = ApiErrors[JSConsumerNameExistErr] s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, result) js.mu.Unlock() return @@ -2545,7 +2546,10 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state } if err != nil { - s.Warnf("Consumer create failed for '%s > %s > %s': %v\n", ca.Client.serviceAccount(), ca.Stream, ca.Name, err) + if IsNatsErr(err, JSConsumerStoreFailedErrF) { + s.Warnf("Consumer create failed for '%s > %s > %s': %v", ca.Client.serviceAccount(), ca.Stream, ca.Name, err) + } + js.mu.Lock() ca.err = err @@ -2568,7 +2572,7 @@ func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment, state Consumer: ca.Name, Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}, } - result.Response.Error = jsError(err) + result.Response.Error = ApiErrors[JSConsumerCreateErrF].ErrOrNewT(err, "{err}", err) } else if err == errNoInterest { // This is a stranded ephemeral, let's clean this one up. subject := fmt.Sprintf(JSApiConsumerDeleteT, ca.Stream, ca.Name) @@ -2614,12 +2618,12 @@ func (js *jetStream) processClusterDeleteConsumer(ca *consumerAssignment, isMemb // Go ahead and delete the consumer. mset, err := acc.lookupStream(ca.Stream) if err != nil { - resp.Error = jsNotFoundError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) } else if mset != nil { if o := mset.lookupConsumer(ca.Name); o != nil { - err = o.stopWithFlags(true, true, wasLeader) + err = o.stopWithFlags(true, false, true, wasLeader) } else { - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] } } @@ -2633,7 +2637,7 @@ func (js *jetStream) processClusterDeleteConsumer(ca *consumerAssignment, isMemb if err != nil { if resp.Error == nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamNotFoundErr].ErrOr(err) } s.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp)) } else { @@ -2806,7 +2810,7 @@ func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLea } js.mu.RUnlock() if peer := string(e.Data); peer == ourID { - o.stopWithFlags(true, false, false) + o.stopWithFlags(true, false, false, false) } return nil } else { @@ -2848,7 +2852,6 @@ func (o *consumer) processReplicatedAck(dseq, sseq uint64) { o.store.UpdateAcks(dseq, sseq) o.mu.RLock() - mset := o.mset if mset == nil || mset.cfg.Retention == LimitsPolicy { o.mu.RUnlock() @@ -2968,7 +2971,7 @@ func (js *jetStream) processConsumerLeaderChange(o *consumer, isLeader bool) { var resp = JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}} if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSConsumerCreateErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp)) } else { resp.ConsumerInfo = o.info() @@ -3068,7 +3071,7 @@ type streamAssignmentResult struct { // Process error results of stream and consumer assignments. // Success will be handled by stream leader. -func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) { +func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { var result streamAssignmentResult if err := json.Unmarshal(msg, &result); err != nil { // TODO(dlc) - log @@ -3102,13 +3105,13 @@ func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client // TODO(dlc) - Could have mixed results, should track per peer. // Set sa.err while we are deleting so we will not respond to list/names requests. if !result.Update && time.Since(sa.Created) < 5*time.Second { - sa.err = ErrJetStreamNotAssigned + sa.err = ApiErrors[JSClusterNotAssignedErr] cc.meta.Propose(encodeDeleteStreamAssignment(sa)) } } } -func (js *jetStream) processConsumerAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) { +func (js *jetStream) processConsumerAssignmentResults(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { var result consumerAssignmentResult if err := json.Unmarshal(msg, &result); err != nil { // TODO(dlc) - log @@ -3133,7 +3136,7 @@ func (js *jetStream) processConsumerAssignmentResults(sub *subscription, c *clie // TODO(dlc) - Could have mixed results, should track per peer. if result.Response.Error != nil { // So while we are deleting we will not respond to list/names requests. - ca.err = ErrJetStreamNotAssigned + ca.err = ApiErrors[JSClusterNotAssignedErr] cc.meta.Propose(encodeDeleteConsumerAssignment(ca)) } } @@ -3319,14 +3322,14 @@ func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, acc.mu.RUnlock() if jsa == nil { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } ccfg, err := checkStreamCfg(config) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamInvalidConfigF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3343,14 +3346,14 @@ func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, jsa.mu.RUnlock() if exceeded { - resp.Error = jsError(fmt.Errorf("maximum number of streams reached")) + resp.Error = ApiErrors[JSMaximumStreamsLimitErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } // Check for stream limits here before proposing. if err := jsa.checkLimits(cfg); err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamLimitsErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3360,7 +3363,7 @@ func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, defer js.mu.Unlock() if sa := js.streamAssignment(acc.Name, cfg.Name); sa != nil { - resp.Error = jsError(ErrJetStreamStreamAlreadyUsed) + resp.Error = ApiErrors[JSStreamNameExistErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3370,7 +3373,7 @@ func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, for _, subj := range sa.Config.Subjects { for _, tsubj := range cfg.Subjects { if SubjectsCollide(tsubj, subj) { - resp.Error = jsError(fmt.Errorf("subjects overlap with an existing stream")) + resp.Error = ApiErrors[JSStreamSubjectOverlapErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3381,7 +3384,7 @@ func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, // Raft group selection and placement. rg := cc.createGroupForStream(ci, cfg) if rg == nil { - resp.Error = jsInsufficientErr + resp.Error = ApiErrors[JSInsufficientResourcesErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3407,32 +3410,32 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su osa := js.streamAssignment(acc.Name, cfg.Name) if osa == nil { - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } var newCfg *StreamConfig if jsa := js.accounts[acc.Name]; jsa != nil { if ncfg, err := jsa.configUpdateCheck(osa.Config, cfg); err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamUpdateErrF].ErrOrNewT(err, "{err}", err) s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } else { newCfg = ncfg } } else { - resp.Error = jsNotEnabledErr + resp.Error = ApiErrors[JSNotEnabledForAccountErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } // Check for cluster changes that we want to error on. if newCfg.Replicas != len(osa.Group.Peers) { - resp.Error = &ApiError{Code: 400, Description: "Replicas configuration can not be updated"} + resp.Error = ApiErrors[JSStreamReplicasNotUpdatableErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } if !reflect.DeepEqual(newCfg.Mirror, osa.Config.Mirror) { - resp.Error = &ApiError{Code: 400, Description: "Mirror configuration can not be updated"} + resp.Error = ApiErrors[JSStreamMirrorNotUpdatableErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3445,7 +3448,7 @@ func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, su for _, subj := range sa.Config.Subjects { for _, tsubj := range newCfg.Subjects { if SubjectsCollide(tsubj, subj) { - resp.Error = jsError(fmt.Errorf("subjects overlap with an existing stream")) + resp.Error = ApiErrors[JSStreamSubjectOverlapErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3469,7 +3472,7 @@ func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, acc *Account, st osa := js.streamAssignment(acc.Name, stream) if osa == nil { var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}} - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3483,7 +3486,16 @@ func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, acc *Account, st cc.meta.Propose(encodeDeleteStreamAssignment(sa)) } -func (s *Server) jsClusteredStreamPurgeRequest(ci *ClientInfo, acc *Account, mset *stream, stream, subject, reply string, rmsg []byte) { +// Process a clustered purge request. +func (s *Server) jsClusteredStreamPurgeRequest( + ci *ClientInfo, + acc *Account, + mset *stream, + stream, subject, reply string, + rmsg []byte, + preq *JSApiStreamPurgeRequest, +) { + js, cc := s.getJetStreamCluster() if js == nil || cc == nil { return @@ -3495,19 +3507,19 @@ func (s *Server) jsClusteredStreamPurgeRequest(ci *ClientInfo, acc *Account, mse sa := js.streamAssignment(acc.Name, stream) if sa == nil { resp := JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}} - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } if n := sa.Group.node; n != nil { - sp := &streamPurge{Stream: stream, LastSeq: mset.state().LastSeq, Subject: subject, Reply: reply, Client: ci} + sp := &streamPurge{Stream: stream, LastSeq: mset.state().LastSeq, Subject: subject, Reply: reply, Client: ci, Request: preq} n.Propose(encodeStreamPurge(sp)) } else if mset != nil { var resp = JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}} - purged, err := mset.purge() + purged, err := mset.purge(preq) if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamGeneralErrorF].ErrOrNewT(err, "{err}", err) } else { resp.Purged = purged resp.Success = true @@ -3529,7 +3541,7 @@ func (s *Server) jsClusteredStreamRestoreRequest(ci *ClientInfo, acc *Account, r resp := JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}} if sa := js.streamAssignment(ci.serviceAccount(), cfg.Name); sa != nil { - resp.Error = jsError(ErrJetStreamStreamAlreadyUsed) + resp.Error = ApiErrors[JSStreamNameExistErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3537,7 +3549,7 @@ func (s *Server) jsClusteredStreamRestoreRequest(ci *ClientInfo, acc *Account, r // Raft group selection and placement. rg := cc.createGroupForStream(ci, cfg) if rg == nil { - resp.Error = jsInsufficientErr + resp.Error = ApiErrors[JSInsufficientResourcesErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3617,7 +3629,7 @@ func (s *Server) jsClusteredStreamListRequest(acc *Account, ci *ClientInfo, offs rc := make(chan *StreamInfo, len(streams)) // Store our handler. - s.sys.replies[inbox] = func(sub *subscription, _ *client, subject, _ string, msg []byte) { + s.sys.replies[inbox] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { var si StreamInfo if err := json.Unmarshal(msg, &si); err != nil { s.Warnf("Error unmarshaling clustered stream info response:%v", err) @@ -3665,7 +3677,7 @@ LOOP: return case <-notActive.C: s.Warnf("Did not receive all stream info results for %q", acc) - resp.Error = jsClusterIncompleteErr + resp.Error = ApiErrors[JSClusterIncompleteErr] break LOOP case si := <-rc: resp.Streams = append(resp.Streams, si) @@ -3748,7 +3760,7 @@ func (s *Server) jsClusteredConsumerListRequest(acc *Account, ci *ClientInfo, of rc := make(chan *ConsumerInfo, len(consumers)) // Store our handler. - s.sys.replies[inbox] = func(sub *subscription, _ *client, subject, _ string, msg []byte) { + s.sys.replies[inbox] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { var ci ConsumerInfo if err := json.Unmarshal(msg, &ci); err != nil { s.Warnf("Error unmarshaling clustered consumer info response:%v", err) @@ -3843,19 +3855,19 @@ func (s *Server) jsClusteredConsumerDeleteRequest(ci *ClientInfo, acc *Account, sa := js.streamAssignment(acc.Name, stream) if sa == nil { - resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } if sa.consumers == nil { - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } oca := sa.consumers[consumer] if oca == nil { - resp.Error = jsNoConsumerErr + resp.Error = ApiErrors[JSConsumerNotFoundErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -3905,9 +3917,9 @@ func (s *Server) jsClusteredMsgDeleteRequest(ci *ClientInfo, acc *Account, mset } var resp = JSApiMsgDeleteResponse{ApiResponse: ApiResponse{Type: JSApiMsgDeleteResponseType}} if err != nil { - resp.Error = jsError(err) + resp.Error = ApiErrors[JSStreamMsgDeleteFailedF].ErrOrNewT(err, "{err}", err) } else if !removed { - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", req.Seq)} + resp.Error = ApiErrors[JSSequenceNotFoundErrF].NewT("{seq}", req.Seq) } else { resp.Success = true } @@ -3966,11 +3978,35 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec // Lookup the stream assignment. sa := js.streamAssignment(acc.Name, stream) if sa == nil { - resp.Error = jsError(ErrJetStreamStreamNotFound) + resp.Error = ApiErrors[JSStreamNotFoundErr] + s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) + return + } + + // Check for max consumers here to short circuit if possible. + if maxc := sa.Config.MaxConsumers; maxc > 0 && len(sa.consumers) >= maxc { + resp.Error = ApiErrors[JSMaximumConsumersLimitErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } + // Also short circuit if DeliverLastPerSubject is set with no FilterSubject. + if cfg.DeliverPolicy == DeliverLastPerSubject { + badConfig := cfg.FilterSubject == _EMPTY_ + if !badConfig { + subjects := sa.Config.Subjects + if len(subjects) == 1 && subjects[0] == cfg.FilterSubject && subjectIsLiteral(subjects[0]) { + badConfig = true + } + } + if badConfig { + resp.Error = ApiErrors[JSConsumerInvalidPolicyErrF].NewT("{err}", + "consumer delivery policy is deliver last per subject, but FilterSubject is not set") + s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) + return + } + } + // Setup proper default for ack wait if we are in explicit ack mode. if cfg.AckWait == 0 && (cfg.AckPolicy == AckExplicit || cfg.AckPolicy == AckAll) { cfg.AckWait = JsAckWaitDefault @@ -3986,7 +4022,7 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec rg := cc.createGroupForConsumer(sa) if rg == nil { - resp.Error = jsInsufficientErr + resp.Error = ApiErrors[JSInsufficientResourcesErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -4021,7 +4057,7 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec shouldErr = len(rr.psubs)+len(rr.qsubs) != 0 } if shouldErr { - resp.Error = jsError(ErrJetStreamConsumerAlreadyUsed) + resp.Error = ApiErrors[JSConsumerNameExistErr] s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp)) return } @@ -4031,14 +4067,12 @@ func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subjec ca := &consumerAssignment{Group: rg, Stream: stream, Name: oname, Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now().UTC()} eca := encodeAddConsumerAssignment(ca) - // Mark this as pending if a durable. - if isDurableConsumer(cfg) { - if sa.consumers == nil { - sa.consumers = make(map[string]*consumerAssignment) - } - ca.pending = true - sa.consumers[ca.Name] = ca + // Mark this as pending. + if sa.consumers == nil { + sa.consumers = make(map[string]*consumerAssignment) } + ca.pending = true + sa.consumers[ca.Name] = ca // Do formal proposal. cc.meta.Propose(eca) @@ -4215,14 +4249,14 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ if js.limitsExceeded(stype) { s.resourcesExeededError() if canRespond { - b, _ := json.Marshal(&JSPubAckResponse{PubAck: &PubAck{Stream: name}, Error: jsInsufficientErr}) + b, _ := json.Marshal(&JSPubAckResponse{PubAck: &PubAck{Stream: name}, Error: ApiErrors[JSInsufficientResourcesErr]}) outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil}) } // Stepdown regardless. if node := mset.raftNode(); node != nil { node.StepDown() } - return 0, ErrJetStreamResourcesExceeded + return 0, ApiErrors[JSInsufficientResourcesErr] } // Check here pre-emptively if we have exceeded our account limits. @@ -4247,7 +4281,7 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ s.Warnf(err.Error()) if canRespond { var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: name}} - resp.Error = &ApiError{Code: 400, Description: "resource limits exceeded for account"} + resp.Error = ApiErrors[JSAccountResourcesExceededErr] response, _ = json.Marshal(resp) outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil}) } @@ -4260,7 +4294,7 @@ func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg [ s.Warnf(err.Error()) if canRespond { var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: name}} - resp.Error = &ApiError{Code: 400, Description: "message size exceeds maximum allowed"} + resp.Error = ApiErrors[JSStreamMessageExceedsMaximumErr] response, _ = json.Marshal(resp) outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil}) } @@ -4457,7 +4491,7 @@ func (mset *stream) processSnapshot(snap *streamSnapshot) { for _, o := range mset.consumers { o.mu.Lock() if o.isLeader() { - o.setInitialPending() + o.setInitialPendingAndStart() } o.mu.Unlock() } @@ -4493,7 +4527,7 @@ RETRY: // Send our catchup request here. reply := syncReplySubject() - sub, err = s.sysSubscribe(reply, func(_ *subscription, _ *client, _, reply string, msg []byte) { + sub, err = s.sysSubscribe(reply, func(_ *subscription, _ *client, _ *Account, _, reply string, msg []byte) { // Make copies - https://github.com/go101/go101/wiki // TODO(dlc) - Since we are using a buffer from the inbound client/route. select { @@ -4533,7 +4567,7 @@ RETRY: } else if isOutOfSpaceErr(err) { s.handleOutOfSpace(msetName) return - } else if err == ErrJetStreamResourcesExceeded { + } else if err == ApiErrors[JSInsufficientResourcesErr] { s.resourcesExeededError() return } else { @@ -4569,7 +4603,7 @@ func (mset *stream) processCatchupMsg(msg []byte) (uint64, error) { } if mset.js.limitsExceeded(mset.cfg.Storage) { - return 0, ErrJetStreamResourcesExceeded + return 0, ApiErrors[JSInsufficientResourcesErr] } // Put into our store @@ -4589,7 +4623,7 @@ func (mset *stream) processCatchupMsg(msg []byte) (uint64, error) { return seq, nil } -func (mset *stream) handleClusterSyncRequest(sub *subscription, c *client, subject, reply string, msg []byte) { +func (mset *stream) handleClusterSyncRequest(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { var sreq streamSyncRequest if err := json.Unmarshal(msg, &sreq); err != nil { // Log error. @@ -4646,9 +4680,9 @@ func (js *jetStream) clusterInfo(rg *raftGroup) *ClusterInfo { for _, rp := range peers { if rp.ID != id && rg.isMember(rp.ID) { - lastSeen := now.Sub(rp.Last) - if lastSeen < 0 { - lastSeen = 1 + var lastSeen time.Duration + if now.After(rp.Last) && rp.Last.Unix() != 0 { + lastSeen = now.Sub(rp.Last) } current := rp.Current if current && lastSeen > lostQuorumInterval { @@ -4674,7 +4708,7 @@ func (mset *stream) checkClusterInfo(si *StreamInfo) { } } -func (mset *stream) handleClusterStreamInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) { +func (mset *stream) handleClusterStreamInfoRequest(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { mset.mu.RLock() sysc, js, sa, config := mset.sysc, mset.srv.js, mset.sa, mset.cfg stype := mset.cfg.Storage @@ -4731,7 +4765,7 @@ func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) { // Setup ackReply for flow control. ackReply := syncAckSubject() - ackSub, _ := s.sysSubscribe(ackReply, func(sub *subscription, c *client, subject, reply string, msg []byte) { + ackSub, _ := s.sysSubscribe(ackReply, func(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { sz := ackReplySize(subject) atomic.AddInt64(&outb, -sz) atomic.AddInt32(&outm, -1) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors.go new file mode 100644 index 00000000..b00dca75 --- /dev/null +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors.go @@ -0,0 +1,114 @@ +package server + +import ( + "fmt" + "strings" +) + +type ErrorIdentifier uint16 + +// IsNatsErr determines if a error matches ID, if multiple IDs are given if the error matches any of these the function will be true +func IsNatsErr(err error, ids ...ErrorIdentifier) bool { + if err == nil { + return false + } + + ce, ok := err.(*ApiError) + if !ok || ce == nil { + return false + } + + for _, id := range ids { + ae, ok := ApiErrors[id] + if !ok || ae == nil { + continue + } + + if ce.ErrCode == ae.ErrCode { + return true + } + } + + return false +} + +// ApiError is included in all responses if there was an error. +type ApiError struct { + Code int `json:"code"` + ErrCode uint16 `json:"err_code,omitempty"` + Description string `json:"description,omitempty"` +} + +// ErrorsData is the source data for generated errors as found in errors.json +type ErrorsData struct { + Constant string `json:"constant"` + Code int `json:"code"` + ErrCode uint16 `json:"error_code"` + Description string `json:"description"` + Comment string `json:"comment"` + Help string `json:"help"` + URL string `json:"url"` + Deprecates string `json:"deprecates"` +} + +func (e *ApiError) Error() string { + return fmt.Sprintf("%s (%d)", e.Description, e.ErrCode) +} + +// ErrOrNewT returns err if it's an ApiError else creates a new error using NewT() +func (e *ApiError) ErrOrNewT(err error, replacements ...interface{}) *ApiError { + if ae, ok := err.(*ApiError); ok { + return ae + } + + return e.NewT(replacements...) +} + +// ErrOr returns err if it's an ApiError else creates a new error +func (e *ApiError) ErrOr(err error) *ApiError { + if ae, ok := err.(*ApiError); ok { + return ae + } + + return e +} + +// NewT creates a new error using strings.Replacer on the Description field, arguments must be an even number like NewT("{err}", err) +func (e *ApiError) NewT(replacements ...interface{}) *ApiError { + ne := &ApiError{ + Code: e.Code, + ErrCode: e.ErrCode, + Description: e.Description, + } + + if len(replacements) == 0 { + return ne + } + + if len(replacements)%2 != 0 { + panic("invalid error replacement") + } + + var ra []string + + var key string + for i, replacement := range replacements { + if i%2 == 0 { + key = replacement.(string) + continue + } + + switch v := replacement.(type) { + case string: + ra = append(ra, key, v) + case error: + ra = append(ra, key, v.Error()) + default: + ra = append(ra, key, fmt.Sprintf("%v", v)) + } + } + + ne.Description = strings.NewReplacer(ra...).Replace(e.Description) + + return ne +} diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go new file mode 100644 index 00000000..2c2628f9 --- /dev/null +++ b/vendor/github.com/nats-io/nats-server/v2/server/jetstream_errors_generated.go @@ -0,0 +1,452 @@ +// Generated code, do not edit. See errors.json and run go generate to update + +package server + +const ( + // JSAccountResourcesExceededErr resource limits exceeded for account + JSAccountResourcesExceededErr ErrorIdentifier = 10002 + + // JSBadRequestErr bad request + JSBadRequestErr ErrorIdentifier = 10003 + + // JSClusterIncompleteErr incomplete results + JSClusterIncompleteErr ErrorIdentifier = 10004 + + // JSClusterNoPeersErr no suitable peers for placement + JSClusterNoPeersErr ErrorIdentifier = 10005 + + // JSClusterNotActiveErr JetStream not in clustered mode + JSClusterNotActiveErr ErrorIdentifier = 10006 + + // JSClusterNotAssignedErr JetStream cluster not assigned to this server + JSClusterNotAssignedErr ErrorIdentifier = 10007 + + // JSClusterNotAvailErr JetStream system temporarily unavailable + JSClusterNotAvailErr ErrorIdentifier = 10008 + + // JSClusterNotLeaderErr JetStream cluster can not handle request + JSClusterNotLeaderErr ErrorIdentifier = 10009 + + // JSClusterPeerNotMemberErr peer not a member + JSClusterPeerNotMemberErr ErrorIdentifier = 10040 + + // JSClusterRequiredErr JetStream clustering support required + JSClusterRequiredErr ErrorIdentifier = 10010 + + // JSClusterServerNotMemberErr server is not a member of the cluster + JSClusterServerNotMemberErr ErrorIdentifier = 10044 + + // JSClusterTagsErr tags placement not supported for operation + JSClusterTagsErr ErrorIdentifier = 10011 + + // JSClusterUnSupportFeatureErr not currently supported in clustered mode + JSClusterUnSupportFeatureErr ErrorIdentifier = 10036 + + // JSConsumerBadDurableNameErr durable name can not contain '.', '*', '>' + JSConsumerBadDurableNameErr ErrorIdentifier = 10103 + + // JSConsumerConfigRequiredErr consumer config required + JSConsumerConfigRequiredErr ErrorIdentifier = 10078 + + // JSConsumerCreateErrF General consumer creation failure string ({err}) + JSConsumerCreateErrF ErrorIdentifier = 10012 + + // JSConsumerDeliverCycleErr consumer deliver subject forms a cycle + JSConsumerDeliverCycleErr ErrorIdentifier = 10081 + + // JSConsumerDeliverToWildcardsErr consumer deliver subject has wildcards + JSConsumerDeliverToWildcardsErr ErrorIdentifier = 10079 + + // JSConsumerDescriptionTooLongErrF consumer description is too long, maximum allowed is {max} + JSConsumerDescriptionTooLongErrF ErrorIdentifier = 10107 + + // JSConsumerDirectRequiresEphemeralErr consumer direct requires an ephemeral consumer + JSConsumerDirectRequiresEphemeralErr ErrorIdentifier = 10091 + + // JSConsumerDirectRequiresPushErr consumer direct requires a push based consumer + JSConsumerDirectRequiresPushErr ErrorIdentifier = 10090 + + // JSConsumerDurableNameNotInSubjectErr consumer expected to be durable but no durable name set in subject + JSConsumerDurableNameNotInSubjectErr ErrorIdentifier = 10016 + + // JSConsumerDurableNameNotMatchSubjectErr consumer name in subject does not match durable name in request + JSConsumerDurableNameNotMatchSubjectErr ErrorIdentifier = 10017 + + // JSConsumerDurableNameNotSetErr consumer expected to be durable but a durable name was not set + JSConsumerDurableNameNotSetErr ErrorIdentifier = 10018 + + // JSConsumerEphemeralWithDurableInSubjectErr consumer expected to be ephemeral but detected a durable name set in subject + JSConsumerEphemeralWithDurableInSubjectErr ErrorIdentifier = 10019 + + // JSConsumerEphemeralWithDurableNameErr consumer expected to be ephemeral but a durable name was set in request + JSConsumerEphemeralWithDurableNameErr ErrorIdentifier = 10020 + + // JSConsumerExistingActiveErr consumer already exists and is still active + JSConsumerExistingActiveErr ErrorIdentifier = 10105 + + // JSConsumerFCRequiresPushErr consumer flow control requires a push based consumer + JSConsumerFCRequiresPushErr ErrorIdentifier = 10089 + + // JSConsumerFilterNotSubsetErr consumer filter subject is not a valid subset of the interest subjects + JSConsumerFilterNotSubsetErr ErrorIdentifier = 10093 + + // JSConsumerHBRequiresPushErr consumer idle heartbeat requires a push based consumer + JSConsumerHBRequiresPushErr ErrorIdentifier = 10088 + + // JSConsumerInvalidPolicyErrF Generic delivery policy error ({err}) + JSConsumerInvalidPolicyErrF ErrorIdentifier = 10094 + + // JSConsumerInvalidSamplingErrF failed to parse consumer sampling configuration: {err} + JSConsumerInvalidSamplingErrF ErrorIdentifier = 10095 + + // JSConsumerMaxPendingAckPolicyRequiredErr consumer requires ack policy for max ack pending + JSConsumerMaxPendingAckPolicyRequiredErr ErrorIdentifier = 10082 + + // JSConsumerMaxWaitingNegativeErr consumer max waiting needs to be positive + JSConsumerMaxWaitingNegativeErr ErrorIdentifier = 10087 + + // JSConsumerNameExistErr consumer name already in use + JSConsumerNameExistErr ErrorIdentifier = 10013 + + // JSConsumerNameTooLongErrF consumer name is too long, maximum allowed is {max} + JSConsumerNameTooLongErrF ErrorIdentifier = 10102 + + // JSConsumerNotFoundErr consumer not found + JSConsumerNotFoundErr ErrorIdentifier = 10014 + + // JSConsumerOnMappedErr consumer direct on a mapped consumer + JSConsumerOnMappedErr ErrorIdentifier = 10092 + + // JSConsumerPullNotDurableErr consumer in pull mode requires a durable name + JSConsumerPullNotDurableErr ErrorIdentifier = 10085 + + // JSConsumerPullRequiresAckErr consumer in pull mode requires explicit ack policy + JSConsumerPullRequiresAckErr ErrorIdentifier = 10084 + + // JSConsumerPullWithRateLimitErr consumer in pull mode can not have rate limit set + JSConsumerPullWithRateLimitErr ErrorIdentifier = 10086 + + // JSConsumerPushMaxWaitingErr consumer in push mode can not set max waiting + JSConsumerPushMaxWaitingErr ErrorIdentifier = 10080 + + // JSConsumerReplacementWithDifferentNameErr consumer replacement durable config not the same + JSConsumerReplacementWithDifferentNameErr ErrorIdentifier = 10106 + + // JSConsumerSmallHeartbeatErr consumer idle heartbeat needs to be >= 100ms + JSConsumerSmallHeartbeatErr ErrorIdentifier = 10083 + + // JSConsumerStoreFailedErrF error creating store for consumer: {err} + JSConsumerStoreFailedErrF ErrorIdentifier = 10104 + + // JSConsumerWQConsumerNotDeliverAllErr consumer must be deliver all on workqueue stream + JSConsumerWQConsumerNotDeliverAllErr ErrorIdentifier = 10101 + + // JSConsumerWQConsumerNotUniqueErr filtered consumer not unique on workqueue stream + JSConsumerWQConsumerNotUniqueErr ErrorIdentifier = 10100 + + // JSConsumerWQMultipleUnfilteredErr multiple non-filtered consumers not allowed on workqueue stream + JSConsumerWQMultipleUnfilteredErr ErrorIdentifier = 10099 + + // JSConsumerWQRequiresExplicitAckErr workqueue stream requires explicit ack + JSConsumerWQRequiresExplicitAckErr ErrorIdentifier = 10098 + + // JSInsufficientResourcesErr insufficient resources + JSInsufficientResourcesErr ErrorIdentifier = 10023 + + // JSInvalidJSONErr invalid JSON + JSInvalidJSONErr ErrorIdentifier = 10025 + + // JSMaximumConsumersLimitErr maximum consumers limit reached + JSMaximumConsumersLimitErr ErrorIdentifier = 10026 + + // JSMaximumStreamsLimitErr maximum number of streams reached + JSMaximumStreamsLimitErr ErrorIdentifier = 10027 + + // JSMemoryResourcesExceededErr insufficient memory resources available + JSMemoryResourcesExceededErr ErrorIdentifier = 10028 + + // JSMirrorConsumerSetupFailedErrF Generic mirror consumer setup failure string ({err}) + JSMirrorConsumerSetupFailedErrF ErrorIdentifier = 10029 + + // JSMirrorMaxMessageSizeTooBigErr stream mirror must have max message size >= source + JSMirrorMaxMessageSizeTooBigErr ErrorIdentifier = 10030 + + // JSMirrorWithSourcesErr stream mirrors can not also contain other sources + JSMirrorWithSourcesErr ErrorIdentifier = 10031 + + // JSMirrorWithStartSeqAndTimeErr stream mirrors can not have both start seq and start time configured + JSMirrorWithStartSeqAndTimeErr ErrorIdentifier = 10032 + + // JSMirrorWithSubjectFiltersErr stream mirrors can not contain filtered subjects + JSMirrorWithSubjectFiltersErr ErrorIdentifier = 10033 + + // JSMirrorWithSubjectsErr stream mirrors can not also contain subjects + JSMirrorWithSubjectsErr ErrorIdentifier = 10034 + + // JSNoAccountErr account not found + JSNoAccountErr ErrorIdentifier = 10035 + + // JSNoMessageFoundErr no message found + JSNoMessageFoundErr ErrorIdentifier = 10037 + + // JSNotEmptyRequestErr expected an empty request payload + JSNotEmptyRequestErr ErrorIdentifier = 10038 + + // JSNotEnabledErr JetStream not enabled + JSNotEnabledErr ErrorIdentifier = 10076 + + // JSNotEnabledForAccountErr JetStream not enabled for account + JSNotEnabledForAccountErr ErrorIdentifier = 10039 + + // JSPeerRemapErr peer remap failed + JSPeerRemapErr ErrorIdentifier = 10075 + + // JSRaftGeneralErrF General RAFT error string ({err}) + JSRaftGeneralErrF ErrorIdentifier = 10041 + + // JSRestoreSubscribeFailedErrF JetStream unable to subscribe to restore snapshot {subject}: {err} + JSRestoreSubscribeFailedErrF ErrorIdentifier = 10042 + + // JSSequenceNotFoundErrF sequence {seq} not found + JSSequenceNotFoundErrF ErrorIdentifier = 10043 + + // JSSnapshotDeliverSubjectInvalidErr deliver subject not valid + JSSnapshotDeliverSubjectInvalidErr ErrorIdentifier = 10015 + + // JSSourceConsumerSetupFailedErrF General source consumer setup failure string ({err}) + JSSourceConsumerSetupFailedErrF ErrorIdentifier = 10045 + + // JSSourceMaxMessageSizeTooBigErr stream source must have max message size >= target + JSSourceMaxMessageSizeTooBigErr ErrorIdentifier = 10046 + + // JSStorageResourcesExceededErr insufficient storage resources available + JSStorageResourcesExceededErr ErrorIdentifier = 10047 + + // JSStreamAssignmentErrF Generic stream assignment error string ({err}) + JSStreamAssignmentErrF ErrorIdentifier = 10048 + + // JSStreamCreateErrF Generic stream creation error string ({err}) + JSStreamCreateErrF ErrorIdentifier = 10049 + + // JSStreamDeleteErrF General stream deletion error string ({err}) + JSStreamDeleteErrF ErrorIdentifier = 10050 + + // JSStreamExternalApiOverlapErrF stream external api prefix {prefix} must not overlap with {subject} + JSStreamExternalApiOverlapErrF ErrorIdentifier = 10021 + + // JSStreamExternalDelPrefixOverlapsErrF stream external delivery prefix {prefix} overlaps with stream subject {subject} + JSStreamExternalDelPrefixOverlapsErrF ErrorIdentifier = 10022 + + // JSStreamGeneralErrorF General stream failure string ({err}) + JSStreamGeneralErrorF ErrorIdentifier = 10051 + + // JSStreamInvalidConfigF Stream configuration validation error string ({err}) + JSStreamInvalidConfigF ErrorIdentifier = 10052 + + // JSStreamInvalidErr stream not valid + JSStreamInvalidErr ErrorIdentifier = 10096 + + // JSStreamInvalidExternalDeliverySubjErrF stream external delivery prefix {prefix} must not contain wildcards + JSStreamInvalidExternalDeliverySubjErrF ErrorIdentifier = 10024 + + // JSStreamLimitsErrF General stream limits exceeded error string ({err}) + JSStreamLimitsErrF ErrorIdentifier = 10053 + + // JSStreamMessageExceedsMaximumErr message size exceeds maximum allowed + JSStreamMessageExceedsMaximumErr ErrorIdentifier = 10054 + + // JSStreamMirrorNotUpdatableErr Mirror configuration can not be updated + JSStreamMirrorNotUpdatableErr ErrorIdentifier = 10055 + + // JSStreamMismatchErr stream name in subject does not match request + JSStreamMismatchErr ErrorIdentifier = 10056 + + // JSStreamMsgDeleteFailedF Generic message deletion failure error string ({err}) + JSStreamMsgDeleteFailedF ErrorIdentifier = 10057 + + // JSStreamNameExistErr stream name already in use + JSStreamNameExistErr ErrorIdentifier = 10058 + + // JSStreamNotFoundErr stream not found + JSStreamNotFoundErr ErrorIdentifier = 10059 + + // JSStreamNotMatchErr expected stream does not match + JSStreamNotMatchErr ErrorIdentifier = 10060 + + // JSStreamReplicasNotSupportedErr replicas > 1 not supported in non-clustered mode + JSStreamReplicasNotSupportedErr ErrorIdentifier = 10074 + + // JSStreamReplicasNotUpdatableErr Replicas configuration can not be updated + JSStreamReplicasNotUpdatableErr ErrorIdentifier = 10061 + + // JSStreamRestoreErrF restore failed: {err} + JSStreamRestoreErrF ErrorIdentifier = 10062 + + // JSStreamSequenceNotMatchErr expected stream sequence does not match + JSStreamSequenceNotMatchErr ErrorIdentifier = 10063 + + // JSStreamSnapshotErrF snapshot failed: {err} + JSStreamSnapshotErrF ErrorIdentifier = 10064 + + // JSStreamStoreFailedF Generic error when storing a message failed ({err}) + JSStreamStoreFailedF ErrorIdentifier = 10077 + + // JSStreamSubjectOverlapErr subjects overlap with an existing stream + JSStreamSubjectOverlapErr ErrorIdentifier = 10065 + + // JSStreamTemplateCreateErrF Generic template creation failed string ({err}) + JSStreamTemplateCreateErrF ErrorIdentifier = 10066 + + // JSStreamTemplateDeleteErrF Generic stream template deletion failed error string ({err}) + JSStreamTemplateDeleteErrF ErrorIdentifier = 10067 + + // JSStreamTemplateNotFoundErr template not found + JSStreamTemplateNotFoundErr ErrorIdentifier = 10068 + + // JSStreamUpdateErrF Generic stream update error string ({err}) + JSStreamUpdateErrF ErrorIdentifier = 10069 + + // JSStreamWrongLastMsgIDErrF wrong last msg ID: {id} + JSStreamWrongLastMsgIDErrF ErrorIdentifier = 10070 + + // JSStreamWrongLastSequenceErrF wrong last sequence: {seq} + JSStreamWrongLastSequenceErrF ErrorIdentifier = 10071 + + // JSTempStorageFailedErr JetStream unable to open temp storage for restore + JSTempStorageFailedErr ErrorIdentifier = 10072 + + // JSTemplateNameNotMatchSubjectErr template name in subject does not match request + JSTemplateNameNotMatchSubjectErr ErrorIdentifier = 10073 +) + +var ( + ApiErrors = map[ErrorIdentifier]*ApiError{ + JSAccountResourcesExceededErr: {Code: 400, ErrCode: 10002, Description: "resource limits exceeded for account"}, + JSBadRequestErr: {Code: 400, ErrCode: 10003, Description: "bad request"}, + JSClusterIncompleteErr: {Code: 503, ErrCode: 10004, Description: "incomplete results"}, + JSClusterNoPeersErr: {Code: 400, ErrCode: 10005, Description: "no suitable peers for placement"}, + JSClusterNotActiveErr: {Code: 500, ErrCode: 10006, Description: "JetStream not in clustered mode"}, + JSClusterNotAssignedErr: {Code: 500, ErrCode: 10007, Description: "JetStream cluster not assigned to this server"}, + JSClusterNotAvailErr: {Code: 503, ErrCode: 10008, Description: "JetStream system temporarily unavailable"}, + JSClusterNotLeaderErr: {Code: 500, ErrCode: 10009, Description: "JetStream cluster can not handle request"}, + JSClusterPeerNotMemberErr: {Code: 400, ErrCode: 10040, Description: "peer not a member"}, + JSClusterRequiredErr: {Code: 503, ErrCode: 10010, Description: "JetStream clustering support required"}, + JSClusterServerNotMemberErr: {Code: 400, ErrCode: 10044, Description: "server is not a member of the cluster"}, + JSClusterTagsErr: {Code: 400, ErrCode: 10011, Description: "tags placement not supported for operation"}, + JSClusterUnSupportFeatureErr: {Code: 503, ErrCode: 10036, Description: "not currently supported in clustered mode"}, + JSConsumerBadDurableNameErr: {Code: 400, ErrCode: 10103, Description: "durable name can not contain '.', '*', '>'"}, + JSConsumerConfigRequiredErr: {Code: 400, ErrCode: 10078, Description: "consumer config required"}, + JSConsumerCreateErrF: {Code: 500, ErrCode: 10012, Description: "{err}"}, + JSConsumerDeliverCycleErr: {Code: 400, ErrCode: 10081, Description: "consumer deliver subject forms a cycle"}, + JSConsumerDeliverToWildcardsErr: {Code: 400, ErrCode: 10079, Description: "consumer deliver subject has wildcards"}, + JSConsumerDescriptionTooLongErrF: {Code: 400, ErrCode: 10107, Description: "consumer description is too long, maximum allowed is {max}"}, + JSConsumerDirectRequiresEphemeralErr: {Code: 400, ErrCode: 10091, Description: "consumer direct requires an ephemeral consumer"}, + JSConsumerDirectRequiresPushErr: {Code: 400, ErrCode: 10090, Description: "consumer direct requires a push based consumer"}, + JSConsumerDurableNameNotInSubjectErr: {Code: 400, ErrCode: 10016, Description: "consumer expected to be durable but no durable name set in subject"}, + JSConsumerDurableNameNotMatchSubjectErr: {Code: 400, ErrCode: 10017, Description: "consumer name in subject does not match durable name in request"}, + JSConsumerDurableNameNotSetErr: {Code: 400, ErrCode: 10018, Description: "consumer expected to be durable but a durable name was not set"}, + JSConsumerEphemeralWithDurableInSubjectErr: {Code: 400, ErrCode: 10019, Description: "consumer expected to be ephemeral but detected a durable name set in subject"}, + JSConsumerEphemeralWithDurableNameErr: {Code: 400, ErrCode: 10020, Description: "consumer expected to be ephemeral but a durable name was set in request"}, + JSConsumerExistingActiveErr: {Code: 400, ErrCode: 10105, Description: "consumer already exists and is still active"}, + JSConsumerFCRequiresPushErr: {Code: 400, ErrCode: 10089, Description: "consumer flow control requires a push based consumer"}, + JSConsumerFilterNotSubsetErr: {Code: 400, ErrCode: 10093, Description: "consumer filter subject is not a valid subset of the interest subjects"}, + JSConsumerHBRequiresPushErr: {Code: 400, ErrCode: 10088, Description: "consumer idle heartbeat requires a push based consumer"}, + JSConsumerInvalidPolicyErrF: {Code: 400, ErrCode: 10094, Description: "{err}"}, + JSConsumerInvalidSamplingErrF: {Code: 400, ErrCode: 10095, Description: "failed to parse consumer sampling configuration: {err}"}, + JSConsumerMaxPendingAckPolicyRequiredErr: {Code: 400, ErrCode: 10082, Description: "consumer requires ack policy for max ack pending"}, + JSConsumerMaxWaitingNegativeErr: {Code: 400, ErrCode: 10087, Description: "consumer max waiting needs to be positive"}, + JSConsumerNameExistErr: {Code: 400, ErrCode: 10013, Description: "consumer name already in use"}, + JSConsumerNameTooLongErrF: {Code: 400, ErrCode: 10102, Description: "consumer name is too long, maximum allowed is {max}"}, + JSConsumerNotFoundErr: {Code: 404, ErrCode: 10014, Description: "consumer not found"}, + JSConsumerOnMappedErr: {Code: 400, ErrCode: 10092, Description: "consumer direct on a mapped consumer"}, + JSConsumerPullNotDurableErr: {Code: 400, ErrCode: 10085, Description: "consumer in pull mode requires a durable name"}, + JSConsumerPullRequiresAckErr: {Code: 400, ErrCode: 10084, Description: "consumer in pull mode requires explicit ack policy"}, + JSConsumerPullWithRateLimitErr: {Code: 400, ErrCode: 10086, Description: "consumer in pull mode can not have rate limit set"}, + JSConsumerPushMaxWaitingErr: {Code: 400, ErrCode: 10080, Description: "consumer in push mode can not set max waiting"}, + JSConsumerReplacementWithDifferentNameErr: {Code: 400, ErrCode: 10106, Description: "consumer replacement durable config not the same"}, + JSConsumerSmallHeartbeatErr: {Code: 400, ErrCode: 10083, Description: "consumer idle heartbeat needs to be >= 100ms"}, + JSConsumerStoreFailedErrF: {Code: 500, ErrCode: 10104, Description: "error creating store for consumer: {err}"}, + JSConsumerWQConsumerNotDeliverAllErr: {Code: 400, ErrCode: 10101, Description: "consumer must be deliver all on workqueue stream"}, + JSConsumerWQConsumerNotUniqueErr: {Code: 400, ErrCode: 10100, Description: "filtered consumer not unique on workqueue stream"}, + JSConsumerWQMultipleUnfilteredErr: {Code: 400, ErrCode: 10099, Description: "multiple non-filtered consumers not allowed on workqueue stream"}, + JSConsumerWQRequiresExplicitAckErr: {Code: 400, ErrCode: 10098, Description: "workqueue stream requires explicit ack"}, + JSInsufficientResourcesErr: {Code: 503, ErrCode: 10023, Description: "insufficient resources"}, + JSInvalidJSONErr: {Code: 400, ErrCode: 10025, Description: "invalid JSON"}, + JSMaximumConsumersLimitErr: {Code: 400, ErrCode: 10026, Description: "maximum consumers limit reached"}, + JSMaximumStreamsLimitErr: {Code: 400, ErrCode: 10027, Description: "maximum number of streams reached"}, + JSMemoryResourcesExceededErr: {Code: 500, ErrCode: 10028, Description: "insufficient memory resources available"}, + JSMirrorConsumerSetupFailedErrF: {Code: 500, ErrCode: 10029, Description: "{err}"}, + JSMirrorMaxMessageSizeTooBigErr: {Code: 400, ErrCode: 10030, Description: "stream mirror must have max message size >= source"}, + JSMirrorWithSourcesErr: {Code: 400, ErrCode: 10031, Description: "stream mirrors can not also contain other sources"}, + JSMirrorWithStartSeqAndTimeErr: {Code: 400, ErrCode: 10032, Description: "stream mirrors can not have both start seq and start time configured"}, + JSMirrorWithSubjectFiltersErr: {Code: 400, ErrCode: 10033, Description: "stream mirrors can not contain filtered subjects"}, + JSMirrorWithSubjectsErr: {Code: 400, ErrCode: 10034, Description: "stream mirrors can not also contain subjects"}, + JSNoAccountErr: {Code: 503, ErrCode: 10035, Description: "account not found"}, + JSNoMessageFoundErr: {Code: 404, ErrCode: 10037, Description: "no message found"}, + JSNotEmptyRequestErr: {Code: 400, ErrCode: 10038, Description: "expected an empty request payload"}, + JSNotEnabledErr: {Code: 503, ErrCode: 10076, Description: "JetStream not enabled"}, + JSNotEnabledForAccountErr: {Code: 503, ErrCode: 10039, Description: "JetStream not enabled for account"}, + JSPeerRemapErr: {Code: 503, ErrCode: 10075, Description: "peer remap failed"}, + JSRaftGeneralErrF: {Code: 500, ErrCode: 10041, Description: "{err}"}, + JSRestoreSubscribeFailedErrF: {Code: 500, ErrCode: 10042, Description: "JetStream unable to subscribe to restore snapshot {subject}: {err}"}, + JSSequenceNotFoundErrF: {Code: 400, ErrCode: 10043, Description: "sequence {seq} not found"}, + JSSnapshotDeliverSubjectInvalidErr: {Code: 400, ErrCode: 10015, Description: "deliver subject not valid"}, + JSSourceConsumerSetupFailedErrF: {Code: 500, ErrCode: 10045, Description: "{err}"}, + JSSourceMaxMessageSizeTooBigErr: {Code: 400, ErrCode: 10046, Description: "stream source must have max message size >= target"}, + JSStorageResourcesExceededErr: {Code: 500, ErrCode: 10047, Description: "insufficient storage resources available"}, + JSStreamAssignmentErrF: {Code: 500, ErrCode: 10048, Description: "{err}"}, + JSStreamCreateErrF: {Code: 500, ErrCode: 10049, Description: "{err}"}, + JSStreamDeleteErrF: {Code: 500, ErrCode: 10050, Description: "{err}"}, + JSStreamExternalApiOverlapErrF: {Code: 400, ErrCode: 10021, Description: "stream external api prefix {prefix} must not overlap with {subject}"}, + JSStreamExternalDelPrefixOverlapsErrF: {Code: 400, ErrCode: 10022, Description: "stream external delivery prefix {prefix} overlaps with stream subject {subject}"}, + JSStreamGeneralErrorF: {Code: 500, ErrCode: 10051, Description: "{err}"}, + JSStreamInvalidConfigF: {Code: 500, ErrCode: 10052, Description: "{err}"}, + JSStreamInvalidErr: {Code: 500, ErrCode: 10096, Description: "stream not valid"}, + JSStreamInvalidExternalDeliverySubjErrF: {Code: 400, ErrCode: 10024, Description: "stream external delivery prefix {prefix} must not contain wildcards"}, + JSStreamLimitsErrF: {Code: 500, ErrCode: 10053, Description: "{err}"}, + JSStreamMessageExceedsMaximumErr: {Code: 400, ErrCode: 10054, Description: "message size exceeds maximum allowed"}, + JSStreamMirrorNotUpdatableErr: {Code: 400, ErrCode: 10055, Description: "Mirror configuration can not be updated"}, + JSStreamMismatchErr: {Code: 400, ErrCode: 10056, Description: "stream name in subject does not match request"}, + JSStreamMsgDeleteFailedF: {Code: 500, ErrCode: 10057, Description: "{err}"}, + JSStreamNameExistErr: {Code: 400, ErrCode: 10058, Description: "stream name already in use"}, + JSStreamNotFoundErr: {Code: 404, ErrCode: 10059, Description: "stream not found"}, + JSStreamNotMatchErr: {Code: 400, ErrCode: 10060, Description: "expected stream does not match"}, + JSStreamReplicasNotSupportedErr: {Code: 500, ErrCode: 10074, Description: "replicas > 1 not supported in non-clustered mode"}, + JSStreamReplicasNotUpdatableErr: {Code: 400, ErrCode: 10061, Description: "Replicas configuration can not be updated"}, + JSStreamRestoreErrF: {Code: 500, ErrCode: 10062, Description: "restore failed: {err}"}, + JSStreamSequenceNotMatchErr: {Code: 503, ErrCode: 10063, Description: "expected stream sequence does not match"}, + JSStreamSnapshotErrF: {Code: 500, ErrCode: 10064, Description: "snapshot failed: {err}"}, + JSStreamStoreFailedF: {Code: 503, ErrCode: 10077, Description: "{err}"}, + JSStreamSubjectOverlapErr: {Code: 500, ErrCode: 10065, Description: "subjects overlap with an existing stream"}, + JSStreamTemplateCreateErrF: {Code: 500, ErrCode: 10066, Description: "{err}"}, + JSStreamTemplateDeleteErrF: {Code: 500, ErrCode: 10067, Description: "{err}"}, + JSStreamTemplateNotFoundErr: {Code: 404, ErrCode: 10068, Description: "template not found"}, + JSStreamUpdateErrF: {Code: 500, ErrCode: 10069, Description: "{err}"}, + JSStreamWrongLastMsgIDErrF: {Code: 400, ErrCode: 10070, Description: "wrong last msg ID: {id}"}, + JSStreamWrongLastSequenceErrF: {Code: 400, ErrCode: 10071, Description: "wrong last sequence: {seq}"}, + JSTempStorageFailedErr: {Code: 500, ErrCode: 10072, Description: "JetStream unable to open temp storage for restore"}, + JSTemplateNameNotMatchSubjectErr: {Code: 400, ErrCode: 10073, Description: "template name in subject does not match request"}, + } + // ErrJetStreamNotClustered Deprecated by JSClusterNotActiveErr ApiError, use IsNatsError() for comparisons + ErrJetStreamNotClustered = ApiErrors[JSClusterNotActiveErr] + // ErrJetStreamNotAssigned Deprecated by JSClusterNotAssignedErr ApiError, use IsNatsError() for comparisons + ErrJetStreamNotAssigned = ApiErrors[JSClusterNotAssignedErr] + // ErrJetStreamNotLeader Deprecated by JSClusterNotLeaderErr ApiError, use IsNatsError() for comparisons + ErrJetStreamNotLeader = ApiErrors[JSClusterNotLeaderErr] + // ErrJetStreamConsumerAlreadyUsed Deprecated by JSConsumerNameExistErr ApiError, use IsNatsError() for comparisons + ErrJetStreamConsumerAlreadyUsed = ApiErrors[JSConsumerNameExistErr] + // ErrJetStreamResourcesExceeded Deprecated by JSInsufficientResourcesErr ApiError, use IsNatsError() for comparisons + ErrJetStreamResourcesExceeded = ApiErrors[JSInsufficientResourcesErr] + // ErrMemoryResourcesExceeded Deprecated by JSMemoryResourcesExceededErr ApiError, use IsNatsError() for comparisons + ErrMemoryResourcesExceeded = ApiErrors[JSMemoryResourcesExceededErr] + // ErrJetStreamNotEnabled Deprecated by JSNotEnabledErr ApiError, use IsNatsError() for comparisons + ErrJetStreamNotEnabled = ApiErrors[JSNotEnabledErr] + // ErrStorageResourcesExceeded Deprecated by JSStorageResourcesExceededErr ApiError, use IsNatsError() for comparisons + ErrStorageResourcesExceeded = ApiErrors[JSStorageResourcesExceededErr] + // ErrJetStreamStreamAlreadyUsed Deprecated by JSStreamNameExistErr ApiError, use IsNatsError() for comparisons + ErrJetStreamStreamAlreadyUsed = ApiErrors[JSStreamNameExistErr] + // ErrJetStreamStreamNotFound Deprecated by JSStreamNotFoundErr ApiError, use IsNatsError() for comparisons + ErrJetStreamStreamNotFound = ApiErrors[JSStreamNotFoundErr] + // ErrReplicasNotSupported Deprecated by JSStreamReplicasNotSupportedErr ApiError, use IsNatsError() for comparisons + ErrReplicasNotSupported = ApiErrors[JSStreamReplicasNotSupportedErr] +) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/jwt.go b/vendor/github.com/nats-io/nats-server/v2/server/jwt.go index 538af9b9..478cf4f5 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/jwt.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/jwt.go @@ -108,7 +108,7 @@ func validateTrustedOperators(o *Options) error { return fmt.Errorf("using nats based account resolver - the system account needs to be specified in configuration or the operator jwt") } } - ver := strings.Split(strings.Split(VERSION, "-")[0], ".RC")[0] + ver := strings.Split(strings.Split(strings.Split(VERSION, "-")[0], ".RC")[0], ".beta")[0] srvMajor, srvMinor, srvUpdate, _ := jwt.ParseServerVersion(ver) for _, opc := range o.TrustedOperators { if major, minor, update, err := jwt.ParseServerVersion(opc.AssertServerVersion); err != nil { diff --git a/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go b/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go index 2e410b1b..d71a57a0 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/leafnode.go @@ -1,4 +1,4 @@ -// Copyright 2019-2020 The NATS Authors +// Copyright 2019-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -112,43 +112,54 @@ func (c *client) isHubLeafNode() bool { // are sharing the system account and wanting to extend the JS domain. // r lock should be held. func (s *Server) addInJSDeny(r *leafNodeCfg) { - var hasDE, hasDI bool + s.addInJSDenyExport(r) + s.addInJSDenyImport(r) +} + +// Will add in the deny export for JetStream on solicited connections if we +// detect we have multiple JetStream domains and we know our local account +// is JetStream enabled. +// r lock should be held. +func (s *Server) addInJSDenyExport(r *leafNodeCfg) { for _, dsubj := range r.DenyExports { if dsubj == jsAllAPI { - hasDE = true - break + return } } + + s.Noticef("Adding deny export of %q for leafnode configuration on %q that bridges system account", jsAllAPI, r.LocalAccount) + r.DenyExports = append(r.DenyExports, jsAllAPI) + + // We added in some deny clauses here so need to regenerate the permissions etc. + perms := &Permissions{} + perms.Publish = &SubjectPermission{Deny: r.DenyExports} + if len(r.DenyImports) > 0 { + perms.Subscribe = &SubjectPermission{Deny: r.DenyImports} + } + r.perms = perms +} + +// Will add in the deny import for JetStream on solicited connections if we +// detect we have multiple JetStream domains and we know our local account +// is JetStream enabled. +// r lock should be held. +func (s *Server) addInJSDenyImport(r *leafNodeCfg) { for _, dsubj := range r.DenyImports { if dsubj == jsAllAPI { - hasDI = true - break + return } } - var addedDeny bool - if !hasDE { - s.Noticef("Adding deny export of %q for leafnode configuration on %q that bridges system account", jsAllAPI, r.LocalAccount) - r.DenyExports = append(r.DenyExports, jsAllAPI) - addedDeny = true - } - if !hasDI { - s.Noticef("Adding deny import of %q for leafnode configuration on %q that bridges system account", jsAllAPI, r.LocalAccount) - r.DenyImports = append(r.DenyImports, jsAllAPI) - addedDeny = true - } + s.Noticef("Adding deny import of %q for leafnode configuration on %q that bridges system account", jsAllAPI, r.LocalAccount) + r.DenyImports = append(r.DenyImports, jsAllAPI) // We added in some deny clauses here so need to regenerate the permissions etc. - if addedDeny { - perms := &Permissions{} - if len(r.DenyExports) > 0 { - perms.Publish = &SubjectPermission{Deny: r.DenyExports} - } - if len(r.DenyImports) > 0 { - perms.Subscribe = &SubjectPermission{Deny: r.DenyImports} - } - r.perms = perms + perms := &Permissions{} + perms.Subscribe = &SubjectPermission{Deny: r.DenyImports} + if len(r.DenyExports) > 0 { + perms.Publish = &SubjectPermission{Deny: r.DenyExports} } + r.perms = perms } // Used for $SYS accounts when sharing but using separate JS domains. @@ -176,21 +187,24 @@ func (s *Server) addInJSDenyAll(r *leafNodeCfg) { func (s *Server) hasSystemRemoteLeaf() bool { s.mu.Lock() defer s.mu.Unlock() + return s.hasSystemRemoteLeafLocked() != nil +} - var sacc string - if s.sys != nil { - sacc = s.sys.account.Name +func (s *Server) hasSystemRemoteLeafLocked() *leafNodeCfg { + if s.sys == nil { + return nil } + sacc := s.sys.account.Name for _, r := range s.leafRemoteCfgs { r.RLock() lacc := r.LocalAccount r.RUnlock() if lacc == sacc { - return true + return r } } - return false + return nil } // This will spin up go routines to solicit the remote leaf node connections. @@ -220,16 +234,6 @@ func validateLeafNode(o *Options) error { return err } - for _, rem := range o.LeafNode.Remotes { - if rem.NoRandomize { - continue - } - - rand.Shuffle(len(rem.URLs), func(i, j int) { - rem.URLs[i], rem.URLs[j] = rem.URLs[j], rem.URLs[i] - }) - } - // In local config mode, check that leafnode configuration refers to accounts that exist. if len(o.TrustedOperators) == 0 { accNames := map[string]struct{}{} @@ -390,6 +394,12 @@ func newLeafNodeCfg(remote *RemoteLeafOpts) *leafNodeCfg { // Start with the one that is configured. We will add to this // array when receiving async leafnode INFOs. cfg.urls = append(cfg.urls, cfg.URLs...) + // If allowed to randomize, do it on our copy of URLs + if !remote.NoRandomize { + rand.Shuffle(len(cfg.urls), func(i, j int) { + cfg.urls[i], cfg.urls[j] = cfg.urls[j], cfg.urls[i] + }) + } // If we are TLS make sure we save off a proper servername if possible. // Do same for user/password since we may need them to connect to // a bare URL that we get from INFO protocol. @@ -453,6 +463,8 @@ func (s *Server) setLeafNodeNonExportedOptions() { } } +const sharedSysAccDelay = 250 * time.Millisecond + func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) { defer s.grWG.Done() @@ -466,8 +478,18 @@ func (s *Server) connectToRemoteLeafNode(remote *leafNodeCfg, firstConnect bool) s.mu.Lock() dialTimeout := s.leafNodeOpts.dialTimeout resolver := s.leafNodeOpts.resolver + var isSysAcc bool + if s.eventsEnabled() { + isSysAcc = remote.LocalAccount == s.sys.account.Name + } s.mu.Unlock() + // If we are sharing a system account and we are not standalone delay to gather some info prior. + if firstConnect && isSysAcc && !s.standAloneMode() { + s.Debugf("Will delay first leafnode connect to shared system account due to clustering") + remote.setConnectDelay(sharedSysAccDelay) + } + if connDelay := remote.getConnectDelay(); connDelay > 0 { select { case <-time.After(connDelay): @@ -636,17 +658,18 @@ var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[- func (c *client) sendLeafConnect(clusterName string, tlsRequired, headers bool) error { // We support basic user/pass and operator based user JWT with signatures. cinfo := leafConnectInfo{ - TLS: tlsRequired, - ID: c.srv.info.ID, - Name: c.srv.info.Name, - Hub: c.leaf.remote.Hub, - Cluster: clusterName, - Headers: headers, - DenyPub: c.leaf.remote.DenyImports, + TLS: tlsRequired, + ID: c.srv.info.ID, + Name: c.srv.info.Name, + Hub: c.leaf.remote.Hub, + Cluster: clusterName, + Headers: headers, + JetStream: c.acc.jetStreamConfigured(), + DenyPub: c.leaf.remote.DenyImports, } // Check for credentials first, that will take precedence.. - if creds := c.leaf.remote.Credentials; creds != "" { + if creds := c.leaf.remote.Credentials; creds != _EMPTY_ { c.Debugf("Authenticating with credentials file %q", c.leaf.remote.Credentials) contents, err := ioutil.ReadFile(creds) if err != nil { @@ -787,9 +810,8 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf // Determines if we are soliciting the connection or not. var solicited bool var acc *Account - + var remoteSuffix string if remote != nil { - // TODO: Decide what should be the optimal behavior here. // For now, if lookup fails, we will constantly try // to recreate this LN connection. @@ -805,14 +827,20 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf var err error acc, err = s.LookupAccount(lacc) if err != nil { - s.Errorf("No local account %q for leafnode: %v", lacc, err) + // An account not existing is something that can happen with nats/http account resolver and the account + // has not yet been pushed, or the request failed for other reasons. + // remote needs to be set or retry won't happen + c.leaf.remote = remote + c.closeConnection(MissingAccount) + s.Errorf("Unable to lookup account %s for solicited leafnode connection: %v", lacc, err) return nil } + remoteSuffix = fmt.Sprintf(" for account: %s", acc.traceLabel()) } c.mu.Lock() c.initClient() - c.Noticef("Leafnode connection created") + c.Noticef("Leafnode connection created%s", remoteSuffix) if remote != nil { solicited = true @@ -994,30 +1022,45 @@ func (c *client) processLeafnodeInfo(info *Info) { // Check for JetStream semantics to deny the JetStream API as needed. // This is so that if JetStream is enabled on both sides we can separately address both. + hasJSDomain := opts.JetStreamDomain != _EMPTY_ + inJSEnabledDomain := s.JetStreamEnabledForDomain() + + // Check for mixed mode scenarios to resolve presence of domain names. + if !s.JetStreamEnabled() && inJSEnabledDomain && !hasJSDomain && s.jetStreamHasDomainConfigured() { + hasJSDomain = true + } + if remote, acc := c.leaf.remote, c.acc; remote != nil { + accHasJS := acc.jetStreamConfigured() remote.Lock() + // JetStream checks for mappings and permissions updates. if acc != sysAcc { - if hasSysShared { + // Check if JetStream is enabled for this domain. If it's not, and the account + // does not have JS, we can act as pass through, so do not deny. + if hasSysShared && (inJSEnabledDomain || accHasJS) { s.addInJSDeny(remote) } else { // Here we want to suppress if this local account has JS enabled. // This is regardless of whether or not this server is actually running JS. - // We do consider this if the other side is not running JetStream. - if acc != nil && acc.jetStreamConfigured() && info.JetStream { - s.addInJSDeny(remote) + // We only suppress export. But we do send an indication about our JetStream + // status in the connect and the hub side will suppress as well if the remote + // account also has JetStream enabled. + if accHasJS { + s.addInJSDenyExport(remote) } } // If we have a specified JetStream domain we will want to add a mapping to // allow access cross domain for each non-system account. - if opts.JetStreamDomain != _EMPTY_ && acc != sysAcc && acc.jetStreamConfigured() { + if hasJSDomain && acc.jetStreamConfigured() { src := fmt.Sprintf(jsDomainAPI, opts.JetStreamDomain) if err := acc.AddMapping(src, jsAllAPI); err != nil { c.Debugf("Error adding JetStream domain mapping: %v", err) } } - } else if opts.JetStreamDomain != _EMPTY_ { + } else if hasJSDomain { s.addInJSDenyAll(remote) } + c.setPermissions(remote.perms) remote.Unlock() } @@ -1039,9 +1082,15 @@ func (c *client) processLeafnodeInfo(info *Info) { // Check if we have local deny clauses that we need to merge. if remote := c.leaf.remote; remote != nil { if len(remote.DenyExports) > 0 { + if perms.Publish == nil { + perms.Publish = &SubjectPermission{} + } perms.Publish.Deny = append(perms.Publish.Deny, remote.DenyExports...) } if len(remote.DenyImports) > 0 { + if perms.Subscribe == nil { + perms.Subscribe = &SubjectPermission{} + } perms.Subscribe.Deny = append(perms.Subscribe.Deny, remote.DenyImports...) } } @@ -1173,7 +1222,7 @@ func (s *Server) setLeafNodeInfoHostPortAndIP() error { // (this solves the stale connection situation). An error is returned to help the // remote detect the misconfiguration when the duplicate is the result of that // misconfiguration. -func (s *Server) addLeafNodeConnection(c *client, srvName string, checkForDup bool) { +func (s *Server) addLeafNodeConnection(c *client, srvName, clusterName string, checkForDup bool) { var accName string c.mu.Lock() cid := c.cid @@ -1192,7 +1241,8 @@ func (s *Server) addLeafNodeConnection(c *client, srvName string, checkForDup bo // is more about replacing stale connections than detecting loops. // We have code for the loop detection elsewhere, which also delays // attempt to reconnect. - if !ol.isSolicitedLeafNode() && ol.leaf.remoteServer == srvName && ol.acc.Name == accName { + if !ol.isSolicitedLeafNode() && ol.leaf.remoteServer == srvName && + ol.leaf.remoteCluster == clusterName && ol.acc.Name == accName { old = ol } ol.mu.Unlock() @@ -1228,21 +1278,24 @@ func (s *Server) removeLeafNodeConnection(c *client) { s.removeFromTempClients(cid) } +// Connect information for solicited leafnodes. type leafConnectInfo struct { - JWT string `json:"jwt,omitempty"` - Sig string `json:"sig,omitempty"` - User string `json:"user,omitempty"` - Pass string `json:"pass,omitempty"` - TLS bool `json:"tls_required"` - Comp bool `json:"compression,omitempty"` - ID string `json:"server_id,omitempty"` - Name string `json:"name,omitempty"` - Hub bool `json:"is_hub,omitempty"` - Cluster string `json:"cluster,omitempty"` - Headers bool `json:"headers,omitempty"` + JWT string `json:"jwt,omitempty"` + Sig string `json:"sig,omitempty"` + User string `json:"user,omitempty"` + Pass string `json:"pass,omitempty"` + TLS bool `json:"tls_required"` + Comp bool `json:"compression,omitempty"` + ID string `json:"server_id,omitempty"` + Name string `json:"name,omitempty"` + Hub bool `json:"is_hub,omitempty"` + Cluster string `json:"cluster,omitempty"` + Headers bool `json:"headers,omitempty"` + JetStream bool `json:"jetstream,omitempty"` + DenyPub []string `json:"deny_pub,omitempty"` + // Just used to detect wrong connection attempts. - Gateway string `json:"gateway,omitempty"` - DenyPub []string `json:"deny_pub,omitempty"` + Gateway string `json:"gateway,omitempty"` } // processLeafNodeConnect will process the inbound connect args. @@ -1265,7 +1318,7 @@ func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) erro // Reject if this has Gateway which means that it would be from a gateway // connection that incorrectly connects to the leafnode port. - if proto.Gateway != "" { + if proto.Gateway != _EMPTY_ { errTxt := fmt.Sprintf("Rejecting connection from gateway %q on the leafnode port", proto.Gateway) c.Errorf(errTxt) c.sendErr(errTxt) @@ -1303,7 +1356,14 @@ func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) erro } // Check for JetStream domain - doDomainMappings := opts.JetStreamDomain != _EMPTY_ && c.acc != sysAcc && c.acc.jetStreamConfigured() + jsConfigured := c.acc.jetStreamConfigured() + doDomainMappings := opts.JetStreamDomain != _EMPTY_ && c.acc != sysAcc && jsConfigured + + // If we have JS enabled and the other side does as well we need to add in an import deny clause. + if jsConfigured && proto.JetStream { + // We should never have existing perms here, if that changes this needs to be reworked. + c.setPermissions(&Permissions{Publish: &SubjectPermission{Deny: []string{jsAllAPI}}}) + } // Set the Ping timer s.setFirstPingTimer(c) @@ -1315,7 +1375,7 @@ func (c *client) processLeafNodeConnect(s *Server, arg []byte, lang string) erro c.mu.Unlock() // Add in the leafnode here since we passed through auth at this point. - s.addLeafNodeConnection(c, proto.Name, true) + s.addLeafNodeConnection(c, proto.Name, proto.Cluster, true) // If we have permissions bound to this leafnode we need to send then back to the // origin server for local enforcement. @@ -2022,25 +2082,7 @@ func (c *client) processInboundLeafMsg(msg []byte) { c.in.msgs++ c.in.bytes += int32(len(msg) - LEN_CR_LF) - // Check pub permissions - if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) { - subject := c.pa.subject - // If this subject was mapped we need to check the original subject, not the new one. - if len(c.pa.mapped) > 0 { - subject = c.pa.mapped - } - if !c.pubAllowed(string(subject)) { - if c.isHubLeafNode() { - c.leafPubPermViolation(subject) - } else { - c.Debugf("Not permitted to receive from %q", subject) - } - return - } - } - - srv := c.srv - acc := c.acc + srv, acc, subject := c.srv, c.acc, string(c.pa.subject) // Mostly under testing scenarios. if srv == nil || acc == nil { @@ -2054,7 +2096,7 @@ func (c *client) processInboundLeafMsg(msg []byte) { genid := atomic.LoadUint64(&c.acc.sl.genid) if genid == c.in.genid && c.in.results != nil { - r, ok = c.in.results[string(c.pa.subject)] + r, ok = c.in.results[subject] } else { // Reset our L1 completely. c.in.results = make(map[string]*SublistResult) @@ -2063,13 +2105,13 @@ func (c *client) processInboundLeafMsg(msg []byte) { // Go back to the sublist data structure. if !ok { - r = c.acc.sl.Match(string(c.pa.subject)) - c.in.results[string(c.pa.subject)] = r + r = c.acc.sl.Match(subject) + c.in.results[subject] = r // Prune the results cache. Keeps us from unbounded growth. Random delete. if len(c.in.results) > maxResultCacheSize { n := 0 - for subject := range c.in.results { - delete(c.in.results, subject) + for subj := range c.in.results { + delete(c.in.results, subj) if n++; n > pruneSize { break } @@ -2101,12 +2143,6 @@ func (c *client) processInboundLeafMsg(msg []byte) { } } -// Handles a publish permission violation. -// See leafPermViolation() for details. -func (c *client) leafPubPermViolation(subj []byte) { - c.leafPermViolation(true, subj) -} - // Handles a subscription permission violation. // See leafPermViolation() for details. func (c *client) leafSubPermViolation(subj []byte) { @@ -2429,7 +2465,7 @@ func (s *Server) leafNodeFinishConnectProcess(c *client) { // Make sure we register with the account here. c.registerWithAccount(acc) - s.addLeafNodeConnection(c, _EMPTY_, false) + s.addLeafNodeConnection(c, _EMPTY_, _EMPTY_, false) s.initLeafNodeSmapAndSendSubs(c) if sendSysConnectEvent { s.sendLeafNodeConnect(acc) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/memstore.go b/vendor/github.com/nats-io/nats-server/v2/server/memstore.go index bd63a3a8..d075d2ec 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/memstore.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/memstore.go @@ -28,6 +28,8 @@ type memStore struct { state StreamState msgs map[uint64]*storedMsg dmap map[uint64]struct{} + fss map[string]*SimpleState + maxp int64 scb StorageUpdateHandler ageChk *time.Timer consumers int @@ -48,7 +50,15 @@ func newMemStore(cfg *StreamConfig) (*memStore, error) { if cfg.Storage != MemoryStorage { return nil, fmt.Errorf("memStore requires memory storage type in config") } - return &memStore{msgs: make(map[uint64]*storedMsg), dmap: make(map[uint64]struct{}), cfg: *cfg}, nil + ms := &memStore{ + msgs: make(map[uint64]*storedMsg), + fss: make(map[string]*SimpleState), + dmap: make(map[uint64]struct{}), + maxp: cfg.MaxMsgsPer, + cfg: *cfg, + } + + return ms, nil } func (ms *memStore) UpdateConfig(cfg *StreamConfig) error { @@ -95,6 +105,11 @@ func (ms *memStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int if ms.cfg.MaxBytes > 0 && ms.state.Bytes+uint64(len(msg)+len(hdr)) >= uint64(ms.cfg.MaxBytes) { return ErrMaxBytes } + if ms.maxp > 0 && len(subj) > 0 { + if ss := ms.fss[subj]; ss != nil && ss.Msgs >= uint64(ms.maxp) { + return ErrMaxMsgsPerSubject + } + } } if seq != ms.state.LastSeq+1 { @@ -126,6 +141,20 @@ func (ms *memStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int ms.state.LastSeq = seq ms.state.LastTime = now + // Track per subject. + if len(subj) > 0 { + if ss := ms.fss[subj]; ss != nil { + ss.Msgs++ + ss.Last = seq + // Check per subject limits. + if ms.maxp > 0 && ss.Msgs > uint64(ms.maxp) { + ms.enforcePerSubjectLimit(ss) + } + } else { + ms.fss[subj] = &SimpleState{Msgs: 1, First: seq, Last: seq} + } + } + // Limits checks and enforcement. ms.enforceMsgLimit() ms.enforceBytesLimit() @@ -222,35 +251,101 @@ func (ms *memStore) GetSeqFromTime(t time.Time) uint64 { return uint64(index) + ms.state.FirstSeq } -// Returns number of messages matching the subject starting at sequence sseq. -func (ms *memStore) NumFilteredPending(sseq uint64, subj string) (total uint64) { +// FilteredState will return the SimpleState associated with the filtered subject and a proposed starting sequence. +func (ms *memStore) FilteredState(sseq uint64, subj string) SimpleState { ms.mu.RLock() defer ms.mu.RUnlock() + return ms.filteredStateLocked(sseq, subj) +} + +func (ms *memStore) filteredStateLocked(sseq uint64, subj string) SimpleState { + var ss SimpleState if sseq < ms.state.FirstSeq { sseq = ms.state.FirstSeq } - if subj == _EMPTY_ { - if sseq <= ms.state.LastSeq { - return ms.state.LastSeq - sseq - } - return 0 + // If past the end no results. + if sseq > ms.state.LastSeq { + return ss } - var eq func(string, string) bool - if subjectHasWildcard(subj) { - eq = subjectIsSubsetMatch - } else { - eq = func(a, b string) bool { return a == b } + // If we want everything. + if subj == _EMPTY_ || subj == fwcs { + ss.Msgs, ss.First, ss.Last = ms.state.Msgs, ms.state.FirstSeq, ms.state.LastSeq + return ss + } + + wc := subjectHasWildcard(subj) + subs := []string{subj} + if wc { + subs = subs[:0] + for fsubj := range ms.fss { + if subjectIsSubsetMatch(fsubj, subj) { + subs = append(subs, fsubj) + } + } + } + fseq, lseq := ms.state.LastSeq, uint64(0) + for _, subj := range subs { + ss := ms.fss[subj] + if ss == nil { + continue + } + if ss.First < fseq { + fseq = ss.First + } + if ss.Last > lseq { + lseq = ss.Last + } + } + if fseq < sseq { + fseq = sseq } - for seq := sseq; seq <= ms.state.LastSeq; seq++ { + // FIXME(dlc) - Optimize better like filestore. + eq := compareFn(subj) + for seq := fseq; seq <= lseq; seq++ { if sm, ok := ms.msgs[seq]; ok && eq(sm.subj, subj) { - total++ + ss.Msgs++ + if ss.First == 0 { + ss.First = seq + } + ss.Last = seq + } + } + return ss +} + +// SubjectsState returns a map of SimpleState for all matching subjects. +func (ms *memStore) SubjectsState(subject string) map[string]SimpleState { + ms.mu.RLock() + defer ms.mu.RUnlock() + + if len(ms.fss) == 0 { + return nil + } + + fss := make(map[string]SimpleState) + for subj, ss := range ms.fss { + if subject == _EMPTY_ || subject == fwcs || subjectIsSubsetMatch(subj, subject) { + fss[subj] = *ss + } + } + return fss +} + +// Will check the msg limit for this tracked subject. +// Lock should be held. +func (ms *memStore) enforcePerSubjectLimit(ss *SimpleState) { + if ms.maxp <= 0 { + return + } + for nmsgs := ss.Msgs; nmsgs > uint64(ms.maxp); nmsgs = ss.Msgs { + if !ms.removeMsg(ss.First, false) { + break } } - return total } // Will check the msg limit and drop firstSeq msg if needed. @@ -312,6 +407,55 @@ func (ms *memStore) expireMsgs() { } } +// PurgeEx will remove messages based on subject filters, sequence and number of messages to keep. +// Will return the number of purged messages. +func (ms *memStore) PurgeEx(subject string, sequence, keep uint64) (purged uint64, err error) { + if subject == _EMPTY_ || subject == fwcs { + if keep == 0 && (sequence == 0 || sequence == 1) { + return ms.Purge() + } + if sequence > 1 { + return ms.Compact(sequence) + } else if keep > 0 { + ms.mu.RLock() + msgs, lseq := ms.state.Msgs, ms.state.LastSeq + ms.mu.RUnlock() + if keep >= msgs { + return 0, nil + } + return ms.Compact(lseq - keep + 1) + } + return 0, nil + + } + eq := compareFn(subject) + if ss := ms.FilteredState(1, subject); ss.Msgs > 0 { + if keep > 0 { + if keep >= ss.Msgs { + return 0, nil + } + ss.Msgs -= keep + } + last := ss.Last + if sequence > 0 { + last = sequence - 1 + } + ms.mu.Lock() + for seq := ss.First; seq <= last; seq++ { + if sm, ok := ms.msgs[seq]; ok && eq(sm.subj, subject) { + if ok := ms.removeMsg(sm.seq, false); ok { + purged++ + if purged >= ss.Msgs { + break + } + } + } + } + ms.mu.Unlock() + } + return purged, nil +} + // Purge will remove all messages from this store. // Will return the number of purged messages. func (ms *memStore) Purge() (uint64, error) { @@ -324,6 +468,7 @@ func (ms *memStore) Purge() (uint64, error) { ms.state.Bytes = 0 ms.state.Msgs = 0 ms.msgs = make(map[uint64]*storedMsg) + ms.fss = make(map[string]*SimpleState) ms.dmap = make(map[uint64]struct{}) ms.mu.Unlock() @@ -445,11 +590,31 @@ func (ms *memStore) LoadMsg(seq uint64) (string, []byte, []byte, int64, error) { if seq <= last { err = ErrStoreMsgNotFound } - return "", nil, nil, 0, err + return _EMPTY_, nil, nil, 0, err } return sm.subj, sm.hdr, sm.msg, sm.ts, nil } +// LoadLastMsg will return the last message we have that matches a given subject. +// The subject can be a wildcard. +func (ms *memStore) LoadLastMsg(subject string) (subj string, seq uint64, hdr, msg []byte, ts int64, err error) { + var sm *storedMsg + var ok bool + + ms.mu.RLock() + defer ms.mu.RUnlock() + + if subject == _EMPTY_ || subject == fwcs { + sm, ok = ms.msgs[ms.state.LastSeq] + } else if ss := ms.filteredStateLocked(1, subject); ss.Msgs > 0 { + sm, ok = ms.msgs[ss.Last] + } + if !ok || sm == nil { + return _EMPTY_, 0, nil, nil, 0, ErrStoreMsgNotFound + } + return sm.subj, sm.seq, sm.hdr, sm.msg, sm.ts, nil +} + // RemoveMsg will remove the message from this store. // Will return the number of bytes removed. func (ms *memStore) RemoveMsg(seq uint64) (bool, error) { @@ -494,6 +659,30 @@ func (ms *memStore) updateFirstSeq(seq uint64) { } } +// Remove a seq from the fss and select new first. +// Lock should be held. +func (ms *memStore) removeSeqPerSubject(subj string, seq uint64) { + ss := ms.fss[subj] + if ss == nil { + return + } + if ss.Msgs == 1 { + delete(ms.fss, subj) + return + } + ss.Msgs-- + if seq != ss.First { + return + } + // TODO(dlc) - Might want to optimize this. + for tseq := seq + 1; tseq < ss.Last; tseq++ { + if sm := ms.msgs[tseq]; sm != nil && sm.subj == subj { + ss.First = tseq + return + } + } +} + // Removes the message referenced by seq. // Lock should he held. func (ms *memStore) removeMsg(seq uint64, secure bool) bool { @@ -522,6 +711,9 @@ func (ms *memStore) removeMsg(seq uint64, secure bool) bool { sm.seq, sm.ts = 0, 0 } + // Remove any per subject tracking. + ms.removeSeqPerSubject(sm.subj, seq) + if ms.scb != nil { // We do not want to hold any locks here. ms.mu.Unlock() @@ -630,6 +822,9 @@ func (os *consumerMemStore) Stop() error { func (os *consumerMemStore) Delete() error { return os.Stop() } +func (os *consumerMemStore) StreamDelete() error { + return os.Stop() +} func (os *consumerMemStore) State() (*ConsumerState, error) { return nil, nil } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/monitor.go b/vendor/github.com/nats-io/nats-server/v2/server/monitor.go index 66f5619a..9e27323b 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/monitor.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/monitor.go @@ -1054,6 +1054,7 @@ type Varz struct { type JetStreamVarz struct { Config *JetStreamConfig `json:"config,omitempty"` Stats *JetStreamStats `json:"stats,omitempty"` + Meta *ClusterInfo `json:"meta,omitempty"` } // ClusterOptsVarz contains monitoring cluster information @@ -1431,6 +1432,9 @@ func (s *Server) updateVarzRuntimeFields(v *Varz, forceUpdate bool, pcpu float64 // FIXME(dlc) - We have lock inversion that needs to be fixed up properly. s.mu.Unlock() v.JetStream.Stats = s.js.usageStats() + if mg := s.js.getMetaGroup(); mg != nil { + v.JetStream.Meta = s.raftNodeToClusterInfo(mg) + } s.mu.Lock() } } @@ -2371,6 +2375,24 @@ func (s *Server) JszAccount(opts *JSzOptions) (*AccountDetail, error) { return s.accountDetail(jsa, opts.Streams, opts.Consumer, opts.Config), nil } +// helper to get cluster info from node via dummy group +func (s *Server) raftNodeToClusterInfo(node RaftNode) *ClusterInfo { + if node == nil { + return nil + } + peers := node.Peers() + peerList := make([]string, len(peers)) + for i, p := range peers { + peerList[i] = p.ID + } + group := &raftGroup{ + Name: _EMPTY_, + Peers: peerList, + node: node, + } + return s.js.clusterInfo(group) +} + // Jsz returns a Jsz structure containing information about JetStream. func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { // set option defaults @@ -2405,23 +2427,6 @@ func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { } } - // helper to get cluster info from node via dummy group - toClusterInfo := func(node RaftNode) *ClusterInfo { - if node == nil { - return nil - } - peers := node.Peers() - peerList := make([]string, len(peers)) - for i, p := range node.Peers() { - peerList[i] = p.ID - } - group := &raftGroup{ - Name: "", - Peers: peerList, - node: node, - } - return s.js.clusterInfo(group) - } jsi := &JSInfo{ ID: s.ID(), Now: time.Now().UTC(), @@ -2437,10 +2442,12 @@ func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { for _, info := range s.js.accounts { accounts = append(accounts, info) } - jsi.APICalls = atomic.LoadInt64(&s.js.apiCalls) s.js.mu.RUnlock() + jsi.APICalls = atomic.LoadInt64(&s.js.apiCalls) + + jsi.Meta = s.raftNodeToClusterInfo(s.js.getMetaGroup()) + jsi.JetStreamStats = *s.js.usageStats() - jsi.Meta = toClusterInfo(s.js.getMetaGroup()) filterIdx := -1 for i, jsa := range accounts { if jsa.acc().GetName() == opts.Account { @@ -2448,10 +2455,6 @@ func (s *Server) Jsz(opts *JSzOptions) (*JSInfo, error) { } jsa.mu.RLock() jsi.Streams += len(jsa.streams) - jsi.Memory += uint64(jsa.usage.mem) - jsi.Store += uint64(jsa.usage.store) - jsi.API.Total += jsa.usage.api - jsi.API.Errors += jsa.usage.err for _, stream := range jsa.streams { streamState := stream.state() jsi.Messages += streamState.Msgs diff --git a/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go b/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go index bc109c7f..7c7937cf 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/mqtt.go @@ -1,4 +1,4 @@ -// Copyright 2020 The NATS Authors +// Copyright 2020-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -962,7 +962,7 @@ func (s *Server) mqttCreateAccountSessionManager(acc *Account, quitCh chan struc Retention: InterestPolicy, Replicas: as.replicas, } - if _, err := jsa.createStream(cfg); isErrorOtherThan(err, ErrJetStreamStreamAlreadyUsed) { + if _, err := jsa.createStream(cfg); isErrorOtherThan(err, JSStreamNameExistErr) { return nil, fmt.Errorf("create messages stream for account %q: %v", acc.GetName(), err) } @@ -975,7 +975,7 @@ func (s *Server) mqttCreateAccountSessionManager(acc *Account, quitCh chan struc Replicas: as.replicas, } si, err := jsa.createStream(cfg) - if isErrorOtherThan(err, ErrJetStreamStreamAlreadyUsed) { + if isErrorOtherThan(err, JSStreamNameExistErr) { return nil, fmt.Errorf("create retained messages stream for account %q: %v", acc.GetName(), err) } if err != nil { @@ -1110,18 +1110,6 @@ func (jsa *mqttJSA) newRequestEx(kind, subject string, hdr int, msg []byte, time return i, nil } -// If `e` is not nil, returns an error corresponding to e.Description, if not empty, -// or an error of the form: "code %d". -func convertApiErrorToError(e *ApiError) error { - if e == nil { - return nil - } - if e.Description == _EMPTY_ { - return fmt.Errorf("code %d", e.Code) - } - return errors.New(e.Description) -} - func (jsa *mqttJSA) createConsumer(cfg *CreateConsumerRequest) (*JSApiConsumerCreateResponse, error) { cfgb, err := json.Marshal(cfg) if err != nil { @@ -1138,7 +1126,7 @@ func (jsa *mqttJSA) createConsumer(cfg *CreateConsumerRequest) (*JSApiConsumerCr return nil, err } ccr := ccri.(*JSApiConsumerCreateResponse) - return ccr, convertApiErrorToError(ccr.Error) + return ccr, ccr.ToError() } func (jsa *mqttJSA) deleteConsumer(streamName, consName string) (*JSApiConsumerDeleteResponse, error) { @@ -1148,7 +1136,7 @@ func (jsa *mqttJSA) deleteConsumer(streamName, consName string) (*JSApiConsumerD return nil, err } cdr := cdri.(*JSApiConsumerDeleteResponse) - return cdr, convertApiErrorToError(cdr.Error) + return cdr, cdr.ToError() } func (jsa *mqttJSA) createStream(cfg *StreamConfig) (*StreamInfo, error) { @@ -1161,7 +1149,7 @@ func (jsa *mqttJSA) createStream(cfg *StreamConfig) (*StreamInfo, error) { return nil, err } scr := scri.(*JSApiStreamCreateResponse) - return scr.StreamInfo, convertApiErrorToError(scr.Error) + return scr.StreamInfo, scr.ToError() } func (jsa *mqttJSA) lookupStream(name string) (*StreamInfo, error) { @@ -1170,7 +1158,7 @@ func (jsa *mqttJSA) lookupStream(name string) (*StreamInfo, error) { return nil, err } slr := slri.(*JSApiStreamInfoResponse) - return slr.StreamInfo, convertApiErrorToError(slr.Error) + return slr.StreamInfo, slr.ToError() } func (jsa *mqttJSA) deleteStream(name string) (bool, error) { @@ -1179,7 +1167,7 @@ func (jsa *mqttJSA) deleteStream(name string) (bool, error) { return false, err } sdr := sdri.(*JSApiStreamDeleteResponse) - return sdr.Success, convertApiErrorToError(sdr.Error) + return sdr.Success, sdr.ToError() } func (jsa *mqttJSA) loadMsg(streamName string, seq uint64) (*StoredMsg, error) { @@ -1193,7 +1181,7 @@ func (jsa *mqttJSA) loadMsg(streamName string, seq uint64) (*StoredMsg, error) { return nil, err } lmr := lmri.(*JSApiMsgGetResponse) - return lmr.Message, convertApiErrorToError(lmr.Error) + return lmr.Message, lmr.ToError() } func (jsa *mqttJSA) storeMsg(subject string, headers int, msg []byte) (*JSPubAckResponse, error) { @@ -1206,7 +1194,7 @@ func (jsa *mqttJSA) storeMsgWithKind(kind, subject string, headers int, msg []by return nil, err } smr := smri.(*JSPubAckResponse) - return smr, convertApiErrorToError(smr.Error) + return smr, smr.ToError() } func (jsa *mqttJSA) deleteMsg(stream string, seq uint64) { @@ -1224,17 +1212,15 @@ func (jsa *mqttJSA) deleteMsg(stream string, seq uint64) { // ////////////////////////////////////////////////////////////////////////////// -// Returns true if `err1` is not nil and does not match `err2`, that is -// their error strings are different. -// Assumes that `err2` is never nil. -func isErrorOtherThan(err1, err2 error) bool { - return err1 != nil && err1.Error() != err2.Error() +// Returns true if `err` is not nil and does not match the api error with ErrorIdentifier id +func isErrorOtherThan(err error, id ErrorIdentifier) bool { + return err != nil && !IsNatsErr(err, id) } // Process JS API replies. // // Can run from various go routines (consumer's loop, system send loop, etc..). -func (as *mqttAccountSessionManager) processJSAPIReplies(_ *subscription, pc *client, subject, _ string, msg []byte) { +func (as *mqttAccountSessionManager) processJSAPIReplies(_ *subscription, pc *client, _ *Account, subject, _ string, msg []byte) { token := tokenAt(subject, mqttJSATokenPos) if token == _EMPTY_ { return @@ -1250,43 +1236,43 @@ func (as *mqttAccountSessionManager) processJSAPIReplies(_ *subscription, pc *cl case mqttJSAStreamCreate: var resp = &JSApiStreamCreateResponse{} if err := json.Unmarshal(msg, resp); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] } ch <- resp case mqttJSAStreamLookup: var resp = &JSApiStreamInfoResponse{} if err := json.Unmarshal(msg, &resp); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] } ch <- resp case mqttJSAStreamDel: var resp = &JSApiStreamDeleteResponse{} if err := json.Unmarshal(msg, &resp); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] } ch <- resp case mqttJSAConsumerCreate: var resp = &JSApiConsumerCreateResponse{} if err := json.Unmarshal(msg, resp); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] } ch <- resp case mqttJSAConsumerDel: var resp = &JSApiConsumerDeleteResponse{} if err := json.Unmarshal(msg, resp); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] } ch <- resp case mqttJSAMsgStore, mqttJSASessPersist: var resp = &JSPubAckResponse{} if err := json.Unmarshal(msg, resp); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] } ch <- resp case mqttJSAMsgLoad: var resp = &JSApiMsgGetResponse{} if err := json.Unmarshal(msg, resp); err != nil { - resp.Error = jsInvalidJSONErr + resp.Error = ApiErrors[JSInvalidJSONErr] } ch <- resp default: @@ -1298,7 +1284,7 @@ func (as *mqttAccountSessionManager) processJSAPIReplies(_ *subscription, pc *cl // // Run from various go routines (JS consumer, etc..). // No lock held on entry. -func (as *mqttAccountSessionManager) processRetainedMsg(_ *subscription, c *client, subject, reply string, rmsg []byte) { +func (as *mqttAccountSessionManager) processRetainedMsg(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { _, msg := c.msgParts(rmsg) rm := &mqttRetainedMsg{} if err := json.Unmarshal(msg, rm); err != nil { @@ -1324,7 +1310,7 @@ func (as *mqttAccountSessionManager) processRetainedMsg(_ *subscription, c *clie } } -func (as *mqttAccountSessionManager) processRetainedMsgDel(_ *subscription, c *client, subject, reply string, rmsg []byte) { +func (as *mqttAccountSessionManager) processRetainedMsgDel(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { idHash := tokenAt(subject, 3) if idHash == _EMPTY_ || idHash == as.jsa.id { return @@ -1348,7 +1334,7 @@ func (as *mqttAccountSessionManager) processRetainedMsgDel(_ *subscription, c *c // // Can run from various go routines (system send loop, etc..). // No lock held on entry. -func (as *mqttAccountSessionManager) processSessionPersist(_ *subscription, pc *client, subject, _ string, rmsg []byte) { +func (as *mqttAccountSessionManager) processSessionPersist(_ *subscription, pc *client, _ *Account, subject, _ string, rmsg []byte) { // Ignore our own responses here (they are handled elsewhere) if tokenAt(subject, mqttJSAIdTokenPos) == as.jsa.id { return @@ -1361,7 +1347,7 @@ func (as *mqttAccountSessionManager) processSessionPersist(_ *subscription, pc * if err := json.Unmarshal(msg, par); err != nil { return } - if err := convertApiErrorToError(par.Error); err != nil { + if err := par.Error; err != nil { return } cIDHash := strings.TrimPrefix(par.Stream, mqttSessionsStreamNamePrefix) @@ -1712,12 +1698,18 @@ func (as *mqttAccountSessionManager) processSubs(sess *mqttSession, c *client, // Helper that sets the sub's mqtt fields and possibly serialize retained messages. // Assumes account manager and session lock held. setupSub := func(sub *subscription, qos byte) { - if sub.mqtt == nil { - sub.mqtt = &mqttSub{} + subs := []*subscription{sub} + if len(sub.shadow) > 0 { + subs = append(subs, sub.shadow...) } - sub.mqtt.qos = qos - if fromSubProto { - as.serializeRetainedMsgsForSub(sess, c, sub, trace) + for _, sub := range subs { + if sub.mqtt == nil { + sub.mqtt = &mqttSub{} + } + sub.mqtt.qos = qos + if fromSubProto { + as.serializeRetainedMsgsForSub(sess, c, sub, trace) + } } } @@ -1889,13 +1881,13 @@ CREATE_STREAM: if err != nil { // Check for insufficient resources. If that is the case, and if possible, try // again with a lower replicas value. - if cfg.Replicas > 1 && err.Error() == jsInsufficientErr.Description { + if cfg.Replicas > 1 && IsNatsErr(err, JSInsufficientResourcesErr) { cfg.Replicas-- goto CREATE_STREAM } // If there is an error and not simply "already used" (which means that the // stream already exists) then we fail. - if isErrorOtherThan(err, ErrJetStreamStreamAlreadyUsed) { + if isErrorOtherThan(err, JSStreamNameExistErr) { return formatError("create session stream", err) } } @@ -2508,7 +2500,7 @@ CHECK: asm.mu.Lock() asm.addSessToFlappers(cp.clientID) asm.mu.Unlock() - c.Warnf("Replacing old client %q since both have the same client ID %q", ec.String(), cp.clientID) + c.Warnf("Replacing old client %q since both have the same client ID %q", ec, cp.clientID) // Close old client in separate go routine go ec.closeConnection(DuplicateClientID) } @@ -3055,7 +3047,7 @@ func mqttSubscribeTrace(pi uint16, filters []*mqttFilter) string { // message and this is the callback for a QoS1 subscription because in // that case, it will be handled by the other callback. This avoid getting // duplicate deliveries. -func mqttDeliverMsgCbQos0(sub *subscription, pc *client, subject, _ string, rmsg []byte) { +func mqttDeliverMsgCbQos0(sub *subscription, pc *client, _ *Account, subject, _ string, rmsg []byte) { if pc.kind == JETSTREAM { return } @@ -3118,7 +3110,7 @@ func mqttDeliverMsgCbQos0(sub *subscription, pc *client, subject, _ string, rmsg // associated with the JS durable consumer), but in cluster mode, this can be coming // from a route, gw, etc... We make sure that if this is the case, the message contains // a NATS/MQTT header that indicates that this is a published QoS1 message. -func mqttDeliverMsgCbQos1(sub *subscription, pc *client, subject, reply string, rmsg []byte) { +func mqttDeliverMsgCbQos1(sub *subscription, pc *client, _ *Account, subject, reply string, rmsg []byte) { var retained bool // Message on foo.bar is stored under $MQTT.msgs.foo.bar, so the subject has to be diff --git a/vendor/github.com/nats-io/nats-server/v2/server/ocsp.go b/vendor/github.com/nats-io/nats-server/v2/server/ocsp.go new file mode 100644 index 00000000..3f7dbcdf --- /dev/null +++ b/vendor/github.com/nats-io/nats-server/v2/server/ocsp.go @@ -0,0 +1,797 @@ +// Copyright 2021 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/crypto/ocsp" +) + +const ( + defaultOCSPStoreDir = "ocsp" + defaultOCSPCheckInterval = 24 * time.Hour + minOCSPCheckInterval = 2 * time.Minute +) + +type OCSPMode uint8 + +const ( + // OCSPModeAuto staples a status, only if "status_request" is set in cert. + OCSPModeAuto OCSPMode = iota + + // OCSPModeAlways enforces OCSP stapling for certs and shuts down the server in + // case a server is revoked or cannot get OCSP staples. + OCSPModeAlways + + // OCSPModeNever disables OCSP stapling even if cert has Must-Staple flag. + OCSPModeNever + + // OCSPModeMust honors the Must-Staple flag from a certificate but also causing shutdown + // in case the certificate has been revoked. + OCSPModeMust +) + +// OCSPMonitor monitors the state of a staple per certificate. +type OCSPMonitor struct { + kind string + mu sync.Mutex + raw []byte + srv *Server + certFile string + resp *ocsp.Response + hc *http.Client + stopCh chan struct{} + Leaf *x509.Certificate + Issuer *x509.Certificate + + shutdownOnRevoke bool +} + +func (oc *OCSPMonitor) getNextRun() time.Duration { + oc.mu.Lock() + nextUpdate := oc.resp.NextUpdate + oc.mu.Unlock() + + now := time.Now() + if nextUpdate.IsZero() { + // If response is missing NextUpdate, we check the day after. + // Technically, if NextUpdate is missing, we can try whenever. + // https://tools.ietf.org/html/rfc6960#section-4.2.2.1 + return defaultOCSPCheckInterval + } + dur := nextUpdate.Sub(now) / 2 + + // If negative, then wait a couple of minutes before getting another staple. + if dur < 0 { + return minOCSPCheckInterval + } + + return dur +} + +func (oc *OCSPMonitor) getStatus() ([]byte, *ocsp.Response, error) { + raw, resp := oc.getCacheStatus() + if len(raw) > 0 && resp != nil { + // Check if the OCSP is still valid. + if err := validOCSPResponse(resp); err == nil { + return raw, resp, nil + } + } + var err error + raw, resp, err = oc.getLocalStatus() + if err == nil { + return raw, resp, nil + } + + return oc.getRemoteStatus() +} + +func (oc *OCSPMonitor) getCacheStatus() ([]byte, *ocsp.Response) { + oc.mu.Lock() + defer oc.mu.Unlock() + return oc.raw, oc.resp +} + +func (oc *OCSPMonitor) getLocalStatus() ([]byte, *ocsp.Response, error) { + opts := oc.srv.getOpts() + storeDir := opts.StoreDir + if storeDir == _EMPTY_ { + return nil, nil, fmt.Errorf("store_dir not set") + } + + // This key must be based upon the current full certificate, not the public key, + // so MUST be on the full raw certificate and not an SPKI or other reduced form. + key := fmt.Sprintf("%x", sha256.Sum256(oc.Leaf.Raw)) + + oc.mu.Lock() + raw, err := ioutil.ReadFile(filepath.Join(storeDir, defaultOCSPStoreDir, key)) + oc.mu.Unlock() + if err != nil { + return nil, nil, err + } + + resp, err := ocsp.ParseResponse(raw, oc.Issuer) + if err != nil { + return nil, nil, err + } + if err := validOCSPResponse(resp); err != nil { + return nil, nil, err + } + + // Cache the response. + oc.mu.Lock() + oc.raw = raw + oc.resp = resp + oc.mu.Unlock() + + return raw, resp, nil +} + +func (oc *OCSPMonitor) getRemoteStatus() ([]byte, *ocsp.Response, error) { + opts := oc.srv.getOpts() + var overrideURLs []string + if config := opts.OCSPConfig; config != nil { + overrideURLs = config.OverrideURLs + } + getRequestBytes := func(u string, hc *http.Client) ([]byte, error) { + resp, err := hc.Get(u) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("non-ok http status: %d", resp.StatusCode) + } + + return ioutil.ReadAll(resp.Body) + } + + // Request documentation: + // https://tools.ietf.org/html/rfc6960#appendix-A.1 + + reqDER, err := ocsp.CreateRequest(oc.Leaf, oc.Issuer, nil) + if err != nil { + return nil, nil, err + } + + reqEnc := base64.StdEncoding.EncodeToString(reqDER) + + responders := oc.Leaf.OCSPServer + if len(overrideURLs) > 0 { + responders = overrideURLs + } + if len(responders) == 0 { + return nil, nil, fmt.Errorf("no available ocsp servers") + } + + oc.mu.Lock() + hc := oc.hc + oc.mu.Unlock() + var raw []byte + for _, u := range responders { + u = strings.TrimSuffix(u, "/") + raw, err = getRequestBytes(fmt.Sprintf("%s/%s", u, reqEnc), hc) + if err == nil { + break + } + } + if err != nil { + return nil, nil, fmt.Errorf("exhausted ocsp servers: %w", err) + } + + resp, err := ocsp.ParseResponse(raw, oc.Issuer) + if err != nil { + return nil, nil, err + } + if err := validOCSPResponse(resp); err != nil { + return nil, nil, err + } + + if storeDir := opts.StoreDir; storeDir != _EMPTY_ { + key := fmt.Sprintf("%x", sha256.Sum256(oc.Leaf.Raw)) + if err := oc.writeOCSPStatus(storeDir, key, raw); err != nil { + return nil, nil, fmt.Errorf("failed to write ocsp status: %w", err) + } + } + + oc.mu.Lock() + oc.raw = raw + oc.resp = resp + oc.mu.Unlock() + + return raw, resp, nil +} + +func (oc *OCSPMonitor) run() { + s := oc.srv + s.mu.Lock() + quitCh := s.quitCh + s.mu.Unlock() + + defer s.grWG.Done() + + oc.mu.Lock() + shutdownOnRevoke := oc.shutdownOnRevoke + certFile := oc.certFile + stopCh := oc.stopCh + kind := oc.kind + oc.mu.Unlock() + + var nextRun time.Duration + _, resp, err := oc.getStatus() + if err == nil && resp.Status == ocsp.Good { + nextRun = oc.getNextRun() + t := resp.NextUpdate.Format(time.RFC3339Nano) + s.Noticef( + "Found OCSP status for %s certificate at '%s': good, next update %s, checking again in %s", + kind, certFile, t, nextRun, + ) + } else if err == nil && shutdownOnRevoke { + // If resp.Status is ocsp.Revoked, ocsp.Unknown, or any other value. + s.Errorf("Found OCSP status for %s certificate at '%s': %s", kind, certFile, ocspStatusString(resp.Status)) + s.Shutdown() + return + } + + for { + // On reload, if the certificate changes then need to stop this monitor. + select { + case <-time.After(nextRun): + case <-stopCh: + // In case of reload and have to restart the OCSP stapling monitoring. + return + case <-quitCh: + // Server quit channel. + return + } + _, resp, err := oc.getRemoteStatus() + if err != nil { + nextRun = oc.getNextRun() + s.Errorf("Bad OCSP status update for certificate '%s': %s, trying again in %v", certFile, err, nextRun) + continue + } + + switch n := resp.Status; n { + case ocsp.Good: + nextRun = oc.getNextRun() + t := resp.NextUpdate.Format(time.RFC3339Nano) + s.Noticef( + "Received OCSP status for %s certificate '%s': good, next update %s, checking again in %s", + kind, certFile, t, nextRun, + ) + continue + default: + s.Errorf("Received OCSP status for %s certificate '%s': %s", kind, certFile, ocspStatusString(n)) + if shutdownOnRevoke { + s.Shutdown() + } + return + } + } +} + +func (oc *OCSPMonitor) stop() { + oc.mu.Lock() + stopCh := oc.stopCh + oc.mu.Unlock() + stopCh <- struct{}{} +} + +// NewOCSPMonitor takes a TLS configuration then wraps it with the callbacks set for OCSP verification +// along with a monitor that will periodically fetch OCSP staples. +func (srv *Server) NewOCSPMonitor(config *tlsConfigKind) (*tls.Config, *OCSPMonitor, error) { + kind := config.kind + tc := config.tlsConfig + tcOpts := config.tlsOpts + opts := srv.getOpts() + oc := opts.OCSPConfig + + // We need to track the CA certificate in case the CA is not present + // in the chain to be able to verify the signature of the OCSP staple. + var ( + certFile string + caFile string + ) + if kind == kindStringMap[CLIENT] { + tcOpts = opts.tlsConfigOpts + if opts.TLSCert != _EMPTY_ { + certFile = opts.TLSCert + } + if opts.TLSCaCert != _EMPTY_ { + caFile = opts.TLSCaCert + } + } + if tcOpts != nil { + certFile = tcOpts.CertFile + caFile = tcOpts.CaFile + } + + // NOTE: Currently OCSP Stapling is enabled only for the first certificate found. + var mon *OCSPMonitor + for _, cert := range tc.Certificates { + // This is normally non-nil, but can still be nil here when in tests + // or in some embedded scenarios. + if cert.Leaf == nil { + if len(cert.Certificate) <= 0 { + return nil, nil, fmt.Errorf("no certificate found") + } + var err error + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return nil, nil, fmt.Errorf("error parsing certificate: %v", err) + } + } + var shutdownOnRevoke bool + mustStaple := hasOCSPStatusRequest(cert.Leaf) + if oc != nil { + switch { + case oc.Mode == OCSPModeNever: + if mustStaple { + srv.Warnf("Certificate at '%s' has MustStaple but OCSP is disabled", certFile) + } + return tc, nil, nil + case oc.Mode == OCSPModeAlways: + // Start the monitor for this cert even if it does not have + // the MustStaple flag and shutdown the server in case the + // staple ever gets revoked. + mustStaple = true + shutdownOnRevoke = true + case oc.Mode == OCSPModeMust && mustStaple: + shutdownOnRevoke = true + case oc.Mode == OCSPModeAuto && !mustStaple: + // "status_request" MustStaple flag not set in certificate. No need to do anything. + return tc, nil, nil + } + } + if !mustStaple { + // No explicit OCSP config and cert does not have MustStaple flag either. + return tc, nil, nil + } + + if err := srv.setupOCSPStapleStoreDir(); err != nil { + return nil, nil, err + } + + // TODO: Add OCSP 'responder_cert' option in case CA cert not available. + issuer, err := getOCSPIssuer(caFile, cert.Certificate) + if err != nil { + return nil, nil, err + } + + mon = &OCSPMonitor{ + kind: kind, + srv: srv, + hc: &http.Client{Timeout: 30 * time.Second}, + shutdownOnRevoke: shutdownOnRevoke, + certFile: certFile, + stopCh: make(chan struct{}, 1), + Leaf: cert.Leaf, + Issuer: issuer, + } + + // Get the certificate status from the memory, then remote OCSP responder. + if _, resp, err := mon.getStatus(); err != nil { + return nil, nil, fmt.Errorf("bad OCSP status update for certificate at '%s': %s", certFile, err) + } else if err == nil && resp != nil && resp.Status != ocsp.Good && shutdownOnRevoke { + return nil, nil, fmt.Errorf("found existing OCSP status for certificate at '%s': %s", certFile, ocspStatusString(resp.Status)) + } + + // Callbacks below will be in charge of returning the certificate instead, + // so this has to be nil. + tc.Certificates = nil + + // GetCertificate returns a certificate that's presented to a client. + tc.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + raw, _, err := mon.getStatus() + if err != nil { + return nil, err + } + + return &tls.Certificate{ + OCSPStaple: raw, + Certificate: cert.Certificate, + PrivateKey: cert.PrivateKey, + SupportedSignatureAlgorithms: cert.SupportedSignatureAlgorithms, + SignedCertificateTimestamps: cert.SignedCertificateTimestamps, + Leaf: cert.Leaf, + }, nil + } + + // Check whether need to verify staples from a client connection depending on the type. + switch kind { + case kindStringMap[ROUTER], kindStringMap[GATEWAY], kindStringMap[LEAF]: + tc.VerifyConnection = func(s tls.ConnectionState) error { + oresp := s.OCSPResponse + if oresp == nil { + return fmt.Errorf("%s client missing OCSP Staple", kind) + } + + // Client route connections will verify the response of the staple. + if len(s.VerifiedChains) == 0 { + return fmt.Errorf("%s client missing TLS verified chains", kind) + } + chain := s.VerifiedChains[0] + resp, err := ocsp.ParseResponseForCert(oresp, chain[0], issuer) + if err != nil { + return fmt.Errorf("failed to parse OCSP response from %s client: %w", kind, err) + } + if err := resp.CheckSignatureFrom(issuer); err != nil { + return err + } + if resp.Status != ocsp.Good { + return fmt.Errorf("bad status for OCSP Staple from %s client: %s", kind, ocspStatusString(resp.Status)) + } + + return nil + } + + // When server makes a client connection, need to also present an OCSP Staple. + tc.GetClientCertificate = func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + raw, _, err := mon.getStatus() + if err != nil { + return nil, err + } + cert.OCSPStaple = raw + + return &cert, nil + } + default: + // GetClientCertificate returns a certificate that's presented to a server. + tc.GetClientCertificate = func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return &cert, nil + } + } + + } + return tc, mon, nil +} + +func (s *Server) setupOCSPStapleStoreDir() error { + opts := s.getOpts() + storeDir := opts.StoreDir + if storeDir == _EMPTY_ { + return nil + } + storeDir = filepath.Join(storeDir, defaultOCSPStoreDir) + if stat, err := os.Stat(storeDir); os.IsNotExist(err) { + if err := os.MkdirAll(storeDir, defaultDirPerms); err != nil { + return fmt.Errorf("could not create OCSP storage directory - %v", err) + } + } else if stat == nil || !stat.IsDir() { + return fmt.Errorf("OCSP storage directory is not a directory") + } + return nil +} + +type tlsConfigKind struct { + tlsConfig *tls.Config + tlsOpts *TLSConfigOpts + kind string + apply func(*tls.Config) +} + +func (s *Server) configureOCSP() []*tlsConfigKind { + sopts := s.getOpts() + + configs := make([]*tlsConfigKind, 0) + + if config := sopts.TLSConfig; config != nil { + opts := sopts.tlsConfigOpts + o := &tlsConfigKind{ + kind: kindStringMap[CLIENT], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { sopts.TLSConfig = tc }, + } + configs = append(configs, o) + } + if config := sopts.Cluster.TLSConfig; config != nil { + opts := sopts.Cluster.tlsConfigOpts + o := &tlsConfigKind{ + kind: kindStringMap[ROUTER], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { sopts.Cluster.TLSConfig = tc }, + } + configs = append(configs, o) + } + if config := sopts.LeafNode.TLSConfig; config != nil { + opts := sopts.LeafNode.tlsConfigOpts + o := &tlsConfigKind{ + kind: kindStringMap[LEAF], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { + + // RequireAndVerifyClientCert is used to tell a client that it + // should send the client cert to the server. + tc.ClientAuth = tls.RequireAndVerifyClientCert + // GetClientCertificate is used by a client to send the client cert + // to a server. We're a server, so we must not set this. + tc.GetClientCertificate = nil + sopts.LeafNode.TLSConfig = tc + }, + } + configs = append(configs, o) + } + for i, remote := range sopts.LeafNode.Remotes { + opts := remote.tlsConfigOpts + if config := remote.TLSConfig; config != nil { + o := &tlsConfigKind{ + kind: kindStringMap[LEAF], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { + // GetCertificate is used by a server to send the server cert to a + // client. We're a client, so we must not set this. + tc.GetCertificate = nil + + sopts.LeafNode.Remotes[i].TLSConfig = tc + }, + } + configs = append(configs, o) + } + } + if config := sopts.Gateway.TLSConfig; config != nil { + opts := sopts.Gateway.tlsConfigOpts + o := &tlsConfigKind{ + kind: kindStringMap[GATEWAY], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { sopts.Gateway.TLSConfig = tc }, + } + configs = append(configs, o) + } + for i, remote := range sopts.Gateway.Gateways { + opts := remote.tlsConfigOpts + if config := remote.TLSConfig; config != nil { + o := &tlsConfigKind{ + kind: kindStringMap[GATEWAY], + tlsConfig: config, + tlsOpts: opts, + apply: func(tc *tls.Config) { + sopts.Gateway.Gateways[i].TLSConfig = tc + }, + } + configs = append(configs, o) + } + } + return configs +} + +func (s *Server) enableOCSP() error { + configs := s.configureOCSP() + + for _, config := range configs { + tc, mon, err := s.NewOCSPMonitor(config) + if err != nil { + return err + } + // Check if an OCSP stapling monitor is required for this certificate. + if mon != nil { + s.ocsps = append(s.ocsps, mon) + + // Override the TLS config with one that follows OCSP. + config.apply(tc) + } + } + + return nil +} + +func (s *Server) startOCSPMonitoring() { + s.mu.Lock() + ocsps := s.ocsps + s.mu.Unlock() + if ocsps == nil { + return + } + for _, mon := range ocsps { + m := mon + m.mu.Lock() + kind := m.kind + m.mu.Unlock() + s.Noticef("OCSP Stapling enabled for %s connections", kind) + s.startGoRoutine(func() { m.run() }) + } +} + +func (s *Server) reloadOCSP() error { + if err := s.setupOCSPStapleStoreDir(); err != nil { + return err + } + + s.mu.Lock() + ocsps := s.ocsps + s.mu.Unlock() + + // Stop all OCSP Stapling monitors in case there were any running. + for _, oc := range ocsps { + oc.stop() + } + + configs := s.configureOCSP() + + // Restart the monitors under the new configuration. + ocspm := make([]*OCSPMonitor, 0) + for _, config := range configs { + tc, mon, err := s.NewOCSPMonitor(config) + if err != nil { + return err + } + // Check if an OCSP stapling monitor is required for this certificate. + if mon != nil { + ocspm = append(ocspm, mon) + + // Apply latest TLS configuration. + config.apply(tc) + } + } + + // Replace stopped monitors with the new ones. + s.mu.Lock() + s.ocsps = ocspm + s.mu.Unlock() + + // Dispatch all goroutines once again. + s.startOCSPMonitoring() + + return nil +} + +func hasOCSPStatusRequest(cert *x509.Certificate) bool { + // OID for id-pe-tlsfeature defined in RFC here: + // https://datatracker.ietf.org/doc/html/rfc7633 + tlsFeatures := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} + const statusRequestExt = 5 + + // Example values: + // * [48 3 2 1 5] - seen when creating own certs locally + // * [30 3 2 1 5] - seen in the wild + // Documentation: + // https://tools.ietf.org/html/rfc6066 + + for _, ext := range cert.Extensions { + if !ext.Id.Equal(tlsFeatures) { + continue + } + + var val []int + rest, err := asn1.Unmarshal(ext.Value, &val) + if err != nil || len(rest) > 0 { + return false + } + + for _, n := range val { + if n == statusRequestExt { + return true + } + } + break + } + + return false +} + +// writeOCSPStatus writes an OCSP status to a temporary file then moves it to a +// new path, in an attempt to avoid corrupting existing data. +func (oc *OCSPMonitor) writeOCSPStatus(storeDir, file string, data []byte) error { + storeDir = filepath.Join(storeDir, defaultOCSPStoreDir) + tmp, err := ioutil.TempFile(storeDir, "tmp-cert-status") + if err != nil { + return err + } + + if _, err := tmp.Write(data); err != nil { + tmp.Close() + os.Remove(tmp.Name()) + return err + } + if err := tmp.Close(); err != nil { + return err + } + + oc.mu.Lock() + err = os.Rename(tmp.Name(), filepath.Join(storeDir, file)) + oc.mu.Unlock() + if err != nil { + os.Remove(tmp.Name()) + return err + } + + return nil +} + +func parseCertPEM(name string) (*x509.Certificate, error) { + data, err := ioutil.ReadFile(name) + if err != nil { + return nil, err + } + + // Ignoring left over byte slice. + block, _ := pem.Decode(data) + if block == nil { + return nil, fmt.Errorf("failed to parse PEM cert %s", name) + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("unexpected PEM certificate type: %s", block.Type) + } + + return x509.ParseCertificate(block.Bytes) +} + +// getOCSPIssuer returns a CA cert from the given path. If the path is empty, +// then this checks a given cert chain. If both are empty, then it returns an +// error. +func getOCSPIssuer(issuerCert string, chain [][]byte) (*x509.Certificate, error) { + var issuer *x509.Certificate + var err error + switch { + case len(chain) == 1 && issuerCert == _EMPTY_: + err = fmt.Errorf("ocsp ca required in chain or configuration") + case issuerCert != _EMPTY_: + issuer, err = parseCertPEM(issuerCert) + case len(chain) > 1 && issuerCert == _EMPTY_: + issuer, err = x509.ParseCertificate(chain[1]) + default: + err = fmt.Errorf("invalid ocsp ca configuration") + } + if err != nil { + return nil, err + } else if !issuer.IsCA { + return nil, fmt.Errorf("%s invalid ca basic constraints: is not ca", issuerCert) + } + + return issuer, nil +} + +func ocspStatusString(n int) string { + switch n { + case ocsp.Good: + return "good" + case ocsp.Revoked: + return "revoked" + default: + return "unknown" + } +} + +func validOCSPResponse(r *ocsp.Response) error { + // Time validation not handled by ParseResponse. + // https://tools.ietf.org/html/rfc6960#section-4.2.2.1 + if !r.NextUpdate.IsZero() && r.NextUpdate.Before(time.Now()) { + t := r.NextUpdate.Format(time.RFC3339Nano) + return fmt.Errorf("invalid ocsp NextUpdate, is past time: %s", t) + } + if r.ThisUpdate.After(time.Now()) { + t := r.ThisUpdate.Format(time.RFC3339Nano) + return fmt.Errorf("invalid ocsp ThisUpdate, is future time: %s", t) + } + + return nil +} diff --git a/vendor/github.com/nats-io/nats-server/v2/server/opts.go b/vendor/github.com/nats-io/nats-server/v2/server/opts.go index dedb8be4..471ba675 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/opts.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/opts.go @@ -79,6 +79,8 @@ type ClusterOpts struct { // Not exported (used in tests) resolver netResolver + // Snapshot of configured TLS options. + tlsConfigOpts *TLSConfigOpts } // GatewayOpts are options for gateways. @@ -104,16 +106,20 @@ type GatewayOpts struct { // Not exported, for tests. resolver netResolver sendQSubsBufSize int + + // Snapshot of configured TLS options. + tlsConfigOpts *TLSConfigOpts } // RemoteGatewayOpts are options for connecting to a remote gateway // NOTE: This structure is no longer used for monitoring endpoints // and json tags are deprecated and may be removed in the future. type RemoteGatewayOpts struct { - Name string `json:"name"` - TLSConfig *tls.Config `json:"-"` - TLSTimeout float64 `json:"tls_timeout,omitempty"` - URLs []*url.URL `json:"urls,omitempty"` + Name string `json:"name"` + TLSConfig *tls.Config `json:"-"` + TLSTimeout float64 `json:"tls_timeout,omitempty"` + URLs []*url.URL `json:"urls,omitempty"` + tlsConfigOpts *TLSConfigOpts } // LeafNodeOpts are options for a given server to accept leaf node connections and/or connect to a remote cluster. @@ -140,6 +146,9 @@ type LeafNodeOpts struct { resolver netResolver dialTimeout time.Duration connDelay time.Duration + + // Snapshot of configured TLS options. + tlsConfigOpts *TLSConfigOpts } // RemoteLeafOpts are options for connecting to a remote server as a leaf node. @@ -163,6 +172,8 @@ type RemoteLeafOpts struct { Compression bool `json:"-"` NoMasking bool `json:"-"` } + + tlsConfigOpts *TLSConfigOpts } // Options block for nats-server. @@ -212,6 +223,7 @@ type Options struct { JetStreamMaxMemory int64 `json:"-"` JetStreamMaxStore int64 `json:"-"` JetStreamDomain string `json:"-"` + JetStreamKey string `json:"-"` StoreDir string `json:"-"` Websocket WebsocketOpts `json:"-"` MQTT MQTTOpts `json:"-"` @@ -268,6 +280,10 @@ type Options struct { // and used as a filter criteria for some system requests Tags jwt.TagList `json:"-"` + // OCSPConfig enables OCSP Stapling in the server. + OCSPConfig *OCSPConfig + tlsConfigOpts *TLSConfigOpts + // private fields, used to know if bool options are explicitly // defined in config and/or command line params. inConfig map[string]bool @@ -485,6 +501,15 @@ type TLSConfigOpts struct { PinnedCerts PinnedCertSet } +// OCSPConfig represents the options of OCSP stapling options. +type OCSPConfig struct { + // Mode defines the policy for OCSP stapling. + Mode OCSPMode + + // OverrideURLs is the http URL endpoint used to get OCSP staples. + OverrideURLs []string +} + var tlsUsage = ` TLS configuration is specified in the tls section of a configuration file: @@ -780,6 +805,13 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error *errors = append(*errors, err) return } + case "store_dir", "storedir": + // Check if JetStream configuration is also setting the storage directory. + if o.StoreDir != "" { + *errors = append(*errors, &configErr{tk, "Duplicate 'store_dir' configuration"}) + return + } + o.StoreDir = v.(string) case "jetstream": err := parseJetStream(tk, o, errors, warnings) if err != nil { @@ -841,6 +873,57 @@ func (o *Options) processConfigFileLine(k string, v interface{}, errors *[]error o.TLSTimeout = tc.Timeout o.TLSMap = tc.Map o.TLSPinnedCerts = tc.PinnedCerts + + // Need to keep track of path of the original TLS config + // and certs path for OCSP Stapling monitoring. + o.tlsConfigOpts = tc + case "ocsp": + switch vv := v.(type) { + case bool: + if vv { + // Default is Auto which honors Must Staple status request + // but does not shutdown the server in case it is revoked, + // letting the client choose whether to trust or not the server. + o.OCSPConfig = &OCSPConfig{Mode: OCSPModeAuto} + } else { + o.OCSPConfig = &OCSPConfig{Mode: OCSPModeNever} + } + case map[string]interface{}: + ocsp := &OCSPConfig{Mode: OCSPModeAuto} + + for kk, kv := range vv { + _, v = unwrapValue(kv, &tk) + switch kk { + case "mode": + mode := v.(string) + switch { + case strings.EqualFold(mode, "always"): + ocsp.Mode = OCSPModeAlways + case strings.EqualFold(mode, "must"): + ocsp.Mode = OCSPModeMust + case strings.EqualFold(mode, "never"): + ocsp.Mode = OCSPModeNever + case strings.EqualFold(mode, "auto"): + ocsp.Mode = OCSPModeAuto + default: + *errors = append(*errors, &configErr{tk, fmt.Sprintf("error parsing ocsp config: unsupported ocsp mode %T", mode)}) + } + case "urls": + urls := v.([]string) + ocsp.OverrideURLs = urls + case "url": + url := v.(string) + ocsp.OverrideURLs = []string{url} + default: + *errors = append(*errors, &configErr{tk, fmt.Sprintf("error parsing ocsp config: unsupported field %T", kk)}) + return + } + } + o.OCSPConfig = ocsp + default: + *errors = append(*errors, &configErr{tk, fmt.Sprintf("error parsing ocsp config: unsupported type %T", v)}) + return + } case "allow_non_tls": o.AllowNonTLS = v.(bool) case "write_deadline": @@ -1287,6 +1370,7 @@ func parseCluster(v interface{}, opts *Options, errors *[]error, warnings *[]err opts.Cluster.TLSMap = tlsopts.Map opts.Cluster.TLSPinnedCerts = tlsopts.PinnedCerts opts.Cluster.TLSCheckKnownURLs = tlsopts.TLSCheckKnownURLs + opts.Cluster.tlsConfigOpts = tlsopts case "cluster_advertise", "advertise": opts.Cluster.Advertise = mv.(string) case "no_advertise": @@ -1404,6 +1488,7 @@ func parseGateway(v interface{}, o *Options, errors *[]error, warnings *[]error) o.Gateway.TLSMap = tlsopts.Map o.Gateway.TLSCheckKnownURLs = tlsopts.TLSCheckKnownURLs o.Gateway.TLSPinnedCerts = tlsopts.PinnedCerts + o.Gateway.tlsConfigOpts = tlsopts case "advertise": o.Gateway.Advertise = mv.(string) case "connect_retries": @@ -1529,7 +1614,11 @@ func parseJetStream(v interface{}, opts *Options, errors *[]error, warnings *[]e for mk, mv := range vv { tk, mv = unwrapValue(mv, <) switch strings.ToLower(mk) { - case "store_dir", "storedir": + case "store", "store_dir", "storedir": + // StoreDir can be set at the top level as well so have to prevent ambiguous declarations. + if opts.StoreDir != "" { + return &configErr{tk, "Duplicate 'store_dir' configuration"} + } opts.StoreDir = mv.(string) case "max_memory_store", "max_mem_store", "max_mem": opts.JetStreamMaxMemory = mv.(int64) @@ -1539,6 +1628,8 @@ func parseJetStream(v interface{}, opts *Options, errors *[]error, warnings *[]e opts.JetStreamDomain = mv.(string) case "enable", "enabled": doEnable = mv.(bool) + case "key", "ek", "encryption_key": + opts.JetStreamKey = mv.(string) default: if !tk.IsUsedVariable() { err := &unknownConfigFieldErr{ @@ -1628,6 +1719,7 @@ func parseLeafNodes(v interface{}, opts *Options, errors *[]error, warnings *[]e opts.LeafNode.TLSTimeout = tc.Timeout opts.LeafNode.TLSMap = tc.Map opts.LeafNode.TLSPinnedCerts = tc.PinnedCerts + opts.LeafNode.tlsConfigOpts = tc case "leafnode_advertise", "advertise": opts.LeafNode.Advertise = mv.(string) case "no_advertise": @@ -1834,6 +1926,7 @@ func parseRemoteLeafNodes(v interface{}, errors *[]error, warnings *[]error) ([] } else { remote.TLSTimeout = float64(DEFAULT_LEAF_TLS_TIMEOUT) } + remote.tlsConfigOpts = tc case "hub": remote.Hub = v.(bool) case "deny_imports", "deny_import": @@ -1925,6 +2018,7 @@ func parseGateways(v interface{}, errors *[]error, warnings *[]error) ([]*Remote } gateway.TLSConfig = tls gateway.TLSTimeout = tlsopts.Timeout + gateway.tlsConfigOpts = tlsopts case "url": url, err := parseURL(v.(string), "gateway") if err != nil { @@ -3458,6 +3552,14 @@ func parseTLS(v interface{}, isClientCtx bool) (t *TLSConfigOpts, retErr error) at = float64(mv) case float64: at = mv + case string: + d, err := time.ParseDuration(mv) + if err != nil { + return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, 'timeout' %s", err)} + } + at = d.Seconds() + default: + return nil, &configErr{tk, "error parsing tls config, 'timeout' wrong type"} } tc.Timeout = at case "pinned_certs": diff --git a/vendor/github.com/nats-io/nats-server/v2/server/parser.go b/vendor/github.com/nats-io/nats-server/v2/server/parser.go index b7dd7634..a9c3533a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/parser.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/parser.go @@ -1190,7 +1190,7 @@ authErr: parseErr: c.sendErr("Unknown Protocol Operation") snip := protoSnippet(i, PROTO_SNIPPET_SIZE, buf) - err := fmt.Errorf("%s parser ERROR, state=%d, i=%d: proto='%s...'", c.typeString(), c.state, i, snip) + err := fmt.Errorf("%s parser ERROR, state=%d, i=%d: proto='%s...'", c.kindString(), c.state, i, snip) return err } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_darwin.go b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_darwin.go index b00f1e00..7fd26b15 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_darwin.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/pse/pse_darwin.go @@ -1,4 +1,4 @@ -// Copyright 2015-2018 The NATS Authors +// Copyright 2015-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -13,22 +13,74 @@ package pse +// On macs after some studying it seems that typical tools like ps and activity monitor report MaxRss and not +// current RSS. I wrote some C code to pull the real RSS and although it does not go down very often, when it does +// that is not reflected in the typical tooling one might compare us to, so we can skip cgo and just use rusage imo. +// We also do not use virtual memory in the upper layers at all, so ok to skip since rusage does not report vss. + import ( - "fmt" - "os" - "os/exec" + "math" + "sync" + "syscall" + "time" ) -// ProcUsage returns CPU usage -func ProcUsage(pcpu *float64, rss, vss *int64) error { - pidStr := fmt.Sprintf("%d", os.Getpid()) - out, err := exec.Command("ps", "o", "pcpu=,rss=,vsz=", "-p", pidStr).Output() - if err != nil { - *rss, *vss = -1, -1 - return fmt.Errorf("ps call failed:%v", err) +type lastUsage struct { + sync.Mutex + last time.Time + cpu time.Duration + rss int64 + pcpu float64 +} + +// To hold the last usage and call time. +var lu lastUsage + +func init() { + updateUsage() + periodic() +} + +// Get our usage. +func getUsage() (now time.Time, cpu time.Duration, rss int64) { + var ru syscall.Rusage + syscall.Getrusage(syscall.RUSAGE_SELF, &ru) + now = time.Now() + cpu = time.Duration(ru.Utime.Sec)*time.Second + time.Duration(ru.Utime.Usec)*time.Microsecond + cpu += time.Duration(ru.Stime.Sec)*time.Second + time.Duration(ru.Stime.Usec)*time.Microsecond + return now, cpu, ru.Maxrss +} + +// Update last usage. +// We need to have a prior sample to compute pcpu. +func updateUsage() (pcpu float64, rss int64) { + lu.Lock() + defer lu.Unlock() + + now, cpu, rss := getUsage() + // Don't skew pcpu by sampling too close to last sample. + if elapsed := now.Sub(lu.last); elapsed < 500*time.Millisecond { + // Always update rss. + lu.rss = rss + } else { + tcpu := float64(cpu - lu.cpu) + lu.last, lu.cpu, lu.rss = now, cpu, rss + // Want to make this one decimal place and not count on upper layers. + // Cores already taken into account via cpu time measurements. + lu.pcpu = math.Round(tcpu/float64(elapsed)*1000) / 10 } - fmt.Sscanf(string(out), "%f %d %d", pcpu, rss, vss) - *rss *= 1024 // 1k blocks, want bytes. - *vss *= 1024 // 1k blocks, want bytes. + return lu.pcpu, lu.rss +} + +// Sampling function to keep pcpu relevant. +func periodic() { + updateUsage() + time.AfterFunc(time.Second, periodic) +} + +// ProcUsage returns CPU and memory usage. +// Note upper layers do not use virtual memory size, so ok that it is not filled in here. +func ProcUsage(pcpu *float64, rss, vss *int64) error { + *pcpu, *rss = updateUsage() return nil } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/raft.go b/vendor/github.com/nats-io/nats-server/v2/server/raft.go index 5c1d4755..70382c47 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/raft.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/raft.go @@ -1245,11 +1245,15 @@ func (n *raft) Peers() []*Peer { var peers []*Peer for id, ps := range n.peers { + var lag uint64 + if n.commit > ps.li { + lag = n.commit - ps.li + } p := &Peer{ ID: id, Current: id == n.leader || ps.li >= n.applied, Last: time.Unix(0, ps.ts), - Lag: n.commit - ps.li, + Lag: lag, } peers = append(peers, p) } @@ -1647,7 +1651,7 @@ func (n *raft) decodeAppendEntryResponse(msg []byte) *appendEntryResponse { } // Called when a remove peer proposal has been forwarded -func (n *raft) handleForwardedRemovePeerProposal(sub *subscription, c *client, _, reply string, msg []byte) { +func (n *raft) handleForwardedRemovePeerProposal(sub *subscription, c *client, _ *Account, _, reply string, msg []byte) { n.debug("Received forwarded remove peer proposal: %q", msg) if !n.Leader() { @@ -1678,7 +1682,7 @@ func (n *raft) handleForwardedRemovePeerProposal(sub *subscription, c *client, _ } // Called when a peer has forwarded a proposal. -func (n *raft) handleForwardedProposal(sub *subscription, c *client, _, reply string, msg []byte) { +func (n *raft) handleForwardedProposal(sub *subscription, c *client, _ *Account, _, reply string, msg []byte) { if !n.Leader() { n.debug("Ignoring forwarded proposal, not leader") return @@ -1960,9 +1964,7 @@ func (n *raft) catchupFollower(ar *appendEntryResponse) { n.Lock() if n.progress == nil { n.progress = make(map[string]chan uint64) - } - - if ch, ok := n.progress[ar.peer]; ok { + } else if ch, ok := n.progress[ar.peer]; ok { n.debug("Will cancel existing entry for catching up %q", ar.peer) delete(n.progress, ar.peer) // Try to pop them out but make sure to not block. @@ -2332,7 +2334,7 @@ func (n *raft) runAsCandidate() { } // handleAppendEntry handles an append entry from the wire. -func (n *raft) handleAppendEntry(sub *subscription, c *client, subject, reply string, msg []byte) { +func (n *raft) handleAppendEntry(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { if n.outOfResources() { n.debug("AppendEntry not processing inbound, no resources") return @@ -2565,6 +2567,9 @@ func (n *raft) processAppendEntry(ae *appendEntry, sub *subscription) { if ae.pindex == n.pindex { n.truncateWal(ae) n.cancelCatchup() + // Make sure pterms match and we take on the leader's. + // This prevents constant spinning. + n.pterm = ae.pterm n.Unlock() return } @@ -2728,7 +2733,7 @@ func (n *raft) processAppendEntryResponse(ar *appendEntryResponse) { } // handleAppendEntryResponse processes responses to append entries. -func (n *raft) handleAppendEntryResponse(sub *subscription, c *client, subject, reply string, msg []byte) { +func (n *raft) handleAppendEntryResponse(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { // Ignore if not the leader. if !n.Leader() { return @@ -3067,7 +3072,7 @@ func (n *raft) decodeVoteResponse(msg []byte) *voteResponse { return vr } -func (n *raft) handleVoteResponse(sub *subscription, c *client, _, reply string, msg []byte) { +func (n *raft) handleVoteResponse(sub *subscription, c *client, _ *Account, _, reply string, msg []byte) { vr := n.decodeVoteResponse(msg) n.debug("Received a voteResponse %+v", vr) if vr == nil { @@ -3132,7 +3137,7 @@ func (n *raft) processVoteRequest(vr *voteRequest) error { return nil } -func (n *raft) handleVoteRequest(sub *subscription, c *client, subject, reply string, msg []byte) { +func (n *raft) handleVoteRequest(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { vr := n.decodeVoteRequest(msg, reply) if vr == nil { n.error("Received malformed vote request for %q", n.group) diff --git a/vendor/github.com/nats-io/nats-server/v2/server/reload.go b/vendor/github.com/nats-io/nats-server/v2/server/reload.go index 29ceb264..a7b84338 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/reload.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/reload.go @@ -50,6 +50,9 @@ type option interface { // IsAuthChange indicates if this option requires reloading authorization. IsAuthChange() bool + // IsTLSChange indicates if this option requires reloading TLS. + IsTLSChange() bool + // IsClusterPermsChange indicates if this option requires reloading // cluster permissions. IsClusterPermsChange() bool @@ -74,6 +77,10 @@ func (n noopOption) IsAuthChange() bool { return false } +func (n noopOption) IsTLSChange() bool { + return false +} + func (n noopOption) IsClusterPermsChange() bool { return false } @@ -202,6 +209,10 @@ func (t *tlsOption) Apply(server *Server) { server.Noticef("Reloaded: tls = %s", message) } +func (t *tlsOption) IsTLSChange() bool { + return true +} + // tlsTimeoutOption implements the option interface for the tls `timeout` // setting. type tlsTimeoutOption struct { @@ -571,6 +582,15 @@ func (jso jetStreamOption) IsJetStreamChange() bool { return true } +type ocspOption struct { + noopOption + newValue *OCSPConfig +} + +func (a *ocspOption) Apply(s *Server) { + s.Noticef("Reloaded: OCSP") +} + // connectErrorReports implements the option interface for the `connect_error_reports` // setting. type connectErrorReports struct { @@ -681,30 +701,37 @@ func (s *Server) recheckPinnedCerts(curOpts *Options, newOpts *Options) { } } -// Reload reads the current configuration file and applies any supported -// changes. This returns an error if the server was not started with a config -// file or an option which doesn't support hot-swapping was changed. +// Reload reads the current configuration file and calls out to ReloadOptions +// to apply the changes. This returns an error if the server was not started +// with a config file or an option which doesn't support hot-swapping was changed. func (s *Server) Reload() error { s.mu.Lock() - - s.reloading = true - defer func() { - s.mu.Lock() - s.reloading = false - s.mu.Unlock() - }() - - if s.configFile == "" { - s.mu.Unlock() + configFile := s.configFile + s.mu.Unlock() + if configFile == "" { return errors.New("can only reload config when a file is provided using -c or --config") } - newOpts, err := ProcessConfigFile(s.configFile) + newOpts, err := ProcessConfigFile(configFile) if err != nil { - s.mu.Unlock() // TODO: Dump previous good config to a .bak file? return err } + return s.ReloadOptions(newOpts) +} + +// ReloadOptions applies any supported options from the provided Option +// type. This returns an error if an option which doesn't support +// hot-swapping was changed. +func (s *Server) ReloadOptions(newOpts *Options) error { + s.mu.Lock() + + s.reloading = true + defer func() { + s.mu.Lock() + s.reloading = false + s.mu.Unlock() + }() curOpts := s.getOpts() @@ -767,7 +794,6 @@ func (s *Server) Reload() error { s.mu.Unlock() return nil } - func applyBoolFlags(newOpts, flagOpts *Options) { // Reset fields that may have been set to `true` in // MergeOptions() when some of the flags default to `true` @@ -858,7 +884,8 @@ func imposeOrder(value interface{}) error { case WebsocketOpts: sort.Strings(value.AllowedOrigins) case string, bool, int, int32, int64, time.Duration, float64, nil, LeafNodeOpts, ClusterOpts, *tls.Config, PinnedCertSet, - *URLAccResolver, *MemAccResolver, *DirAccResolver, *CacheDirAccResolver, Authentication, MQTTOpts, jwt.TagList: + *URLAccResolver, *MemAccResolver, *DirAccResolver, *CacheDirAccResolver, Authentication, MQTTOpts, jwt.TagList, + *OCSPConfig: // explicitly skipped types default: // this will fail during unit tests @@ -1002,6 +1029,8 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { tmpNew := newValue.(GatewayOpts) tmpOld.TLSConfig = nil tmpNew.TLSConfig = nil + tmpOld.tlsConfigOpts = nil + tmpNew.tlsConfigOpts = nil // Need to do the same for remote gateways' TLS configs. // But we can't just set remotes' TLSConfig to nil otherwise this @@ -1021,12 +1050,14 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { tmpNew := newValue.(LeafNodeOpts) tmpOld.TLSConfig = nil tmpNew.TLSConfig = nil + tmpOld.tlsConfigOpts = nil + tmpNew.tlsConfigOpts = nil // Need to do the same for remote leafnodes' TLS configs. // But we can't just set remotes' TLSConfig to nil otherwise this // would lose the real TLS configuration. - tmpOld.Remotes = copyRemoteLNConfigWithoutTLSConfig(tmpOld.Remotes) - tmpNew.Remotes = copyRemoteLNConfigWithoutTLSConfig(tmpNew.Remotes) + tmpOld.Remotes = copyRemoteLNConfigForReloadCompare(tmpOld.Remotes) + tmpNew.Remotes = copyRemoteLNConfigForReloadCompare(tmpNew.Remotes) // Special check for leafnode remotes changes which are not supported right now. leafRemotesChanged := func(a, b LeafNodeOpts) bool { @@ -1038,6 +1069,10 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { for _, oldRemote := range tmpOld.Remotes { var found bool + if oldRemote.LocalAccount == _EMPTY_ { + oldRemote.LocalAccount = globalAccountName + } + for _, newRemote := range tmpNew.Remotes { // Bind to global account in case not defined. if newRemote.LocalAccount == _EMPTY_ { @@ -1193,6 +1228,8 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) { return nil, fmt.Errorf("config reload not supported for %s: old=%v, new=%v", field.Name, oldValue, newValue) } + case "ocspconfig": + diffOpts = append(diffOpts, &ocspOption{newValue: newValue.(*OCSPConfig)}) default: // TODO(ik): Implement String() on those options to have a nice print. // %v is difficult to figure what's what, %+v print private fields and @@ -1227,12 +1264,13 @@ func copyRemoteGWConfigsWithoutTLSConfig(current []*RemoteGatewayOpts) []*Remote for _, rcfg := range current { cp := *rcfg cp.TLSConfig = nil + cp.tlsConfigOpts = nil rgws = append(rgws, &cp) } return rgws } -func copyRemoteLNConfigWithoutTLSConfig(current []*RemoteLeafOpts) []*RemoteLeafOpts { +func copyRemoteLNConfigForReloadCompare(current []*RemoteLeafOpts) []*RemoteLeafOpts { l := len(current) if l == 0 { return nil @@ -1241,9 +1279,13 @@ func copyRemoteLNConfigWithoutTLSConfig(current []*RemoteLeafOpts) []*RemoteLeaf for _, rcfg := range current { cp := *rcfg cp.TLSConfig = nil + cp.tlsConfigOpts = nil // This is set only when processing a CONNECT, so reset here so that we // don't fail the DeepEqual comparison. cp.TLS = false + // For now, remove DenyImports/Exports since those get modified at runtime + // to add JS APIs. + cp.DenyImports, cp.DenyExports = nil, nil rlns = append(rlns, &cp) } return rlns @@ -1257,6 +1299,7 @@ func (s *Server) applyOptions(ctx *reloadContext, opts []option) { reloadClientTrcLvl = false reloadJetstream = false jsEnabled = false + reloadTLS = false ) for _, opt := range opts { opt.Apply(s) @@ -1269,6 +1312,9 @@ func (s *Server) applyOptions(ctx *reloadContext, opts []option) { if opt.IsAuthChange() { reloadAuth = true } + if opt.IsTLSChange() { + reloadTLS = true + } if opt.IsClusterPermsChange() { reloadClusterPerms = true } @@ -1299,6 +1345,9 @@ func (s *Server) applyOptions(ctx *reloadContext, opts []option) { s.Warnf("Can't start JetStream: %v", err) } } + // Make sure to reset the internal loop's version of JS. + s.resetInternalLoopInfo() + s.sendStatszUpdate() } // For remote gateways and leafnodes, make sure that their TLS configuration @@ -1312,9 +1361,31 @@ func (s *Server) applyOptions(ctx *reloadContext, opts []option) { s.updateRemoteLeafNodesTLSConfig(newOpts) } + if reloadTLS { + // Restart OCSP monitoring. + if err := s.reloadOCSP(); err != nil { + s.Warnf("Can't restart OCSP Stapling: %v", err) + } + } + s.Noticef("Reloaded server configuration") } +// This will send a reset to the internal send loop. +func (s *Server) resetInternalLoopInfo() { + var resetCh chan struct{} + s.mu.Lock() + if s.sys != nil { + // can't hold the lock as go routine reading it may be waiting for lock as well + resetCh = s.sys.resetCh + } + s.mu.Unlock() + + if resetCh != nil { + resetCh <- struct{}{} + } +} + // Update all cached debug and trace settings for every client func (s *Server) reloadClientTraceLevel() { opts := s.getOpts() diff --git a/vendor/github.com/nats-io/nats-server/v2/server/route.go b/vendor/github.com/nats-io/nats-server/v2/server/route.go index ad55dc6d..c6f99d78 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/route.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/route.go @@ -1333,6 +1333,7 @@ func (s *Server) createRoute(conn net.Conn, rURL *url.URL) *client { } // Perform (server or client side) TLS handshake. if _, err := c.doTLSHandshake("route", didSolicit, rURL, tlsConfig, _EMPTY_, opts.Cluster.TLSTimeout, opts.Cluster.TLSPinnedCerts); err != nil { + c.mu.Unlock() return nil } } @@ -1419,7 +1420,10 @@ func (s *Server) addRoute(c *client, info *Info) (bool, bool) { if !exists { s.routes[c.cid] = c s.remotes[id] = c - s.nodeToInfo.Store(c.route.hash, nodeInfo{c.route.remoteName, s.info.Cluster, id, false, info.JetStream}) + // check to be consistent and future proof. but will be same domain + if s.sameDomain(info.Domain) { + s.nodeToInfo.Store(c.route.hash, nodeInfo{c.route.remoteName, s.info.Cluster, info.Domain, id, false, info.JetStream}) + } c.mu.Lock() c.route.connectURLs = info.ClientConnectURLs c.route.wsConnURLs = info.WSConnectURLs @@ -1690,6 +1694,7 @@ func (s *Server) startRouteAcceptLoop() { GatewayURL: s.getGatewayURL(), Headers: s.supportsHeaders(), Cluster: s.info.Cluster, + Domain: s.info.Domain, Dynamic: s.isClusterNameDynamic(), LNOC: true, } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sendq.go b/vendor/github.com/nats-io/nats-server/v2/server/sendq.go index 66109062..39831c11 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sendq.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sendq.go @@ -16,7 +16,6 @@ package server import ( "strconv" "sync" - "time" ) type outMsg struct { @@ -84,7 +83,7 @@ func (sq *sendq) internalLoop() { pm = sq.pending() } } - c.flushClients(10 * time.Millisecond) + c.flushClients(0) } } } diff --git a/vendor/github.com/nats-io/nats-server/v2/server/server.go b/vendor/github.com/nats-io/nats-server/v2/server/server.go index 7e1c4f17..e661bb56 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/server.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/server.go @@ -79,6 +79,7 @@ type Info struct { Nonce string `json:"nonce,omitempty"` Cluster string `json:"cluster,omitempty"` Dynamic bool `json:"cluster_dynamic,omitempty"` + Domain string `json:"domain,omitempty"` ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to. WSConnectURLs []string `json:"ws_connect_urls,omitempty"` // Contains URLs a ws client can connect to. LameDuckMode bool `json:"ldm,omitempty"` @@ -236,6 +237,9 @@ type Server struct { // MQTT structure mqtt srvMQTT + // OCSP monitoring + ocsps []*OCSPMonitor + // exporting account name the importer experienced issues with incompleteAccExporterMap sync.Map @@ -247,17 +251,24 @@ type Server struct { rnMu sync.RWMutex raftNodes map[string]RaftNode - // For mapping from a raft node name back to a server name and cluster. + // For mapping from a raft node name back to a server name and cluster. Node has to be in the same domain. nodeToInfo sync.Map // For out of resources to not log errors too fast. rerrMu sync.Mutex rerrLast time.Time + + // If there is a system account configured, to still support the $G account, + // the server will create a fake user and add it to the list of users. + // Keep track of what that user name is for config reload purposes. + sysAccOnlyNoAuthUser string } +// For tracking JS nodes. type nodeInfo struct { name string cluster string + domain string id string offline bool js bool @@ -323,6 +334,7 @@ func NewServer(opts *Options) (*Server, error) { JetStream: opts.JetStream, Headers: !opts.NoHeaderSupport, Cluster: opts.Cluster.Name, + Domain: opts.JetStreamDomain, } if tlsReq && !info.TLSRequired { @@ -370,7 +382,7 @@ func NewServer(opts *Options) (*Server, error) { // Place ourselves in some lookup maps. ourNode := string(getHash(serverName)) - s.nodeToInfo.Store(ourNode, nodeInfo{serverName, opts.Cluster.Name, info.ID, false, opts.JetStream}) + s.nodeToInfo.Store(ourNode, nodeInfo{serverName, opts.Cluster.Name, opts.JetStreamDomain, info.ID, false, opts.JetStream}) s.routeResolver = opts.Cluster.resolver if s.routeResolver == nil { @@ -385,6 +397,12 @@ func NewServer(opts *Options) (*Server, error) { // Ensure that non-exported options (used in tests) are properly set. s.setLeafNodeNonExportedOptions() + // Setup OCSP Stapling. This will abort server from starting if there + // are no valid staples and OCSP policy is to Always or MustStaple. + if err := s.enableOCSP(); err != nil { + return nil, err + } + // Call this even if there is no gateway defined. It will // initialize the structure so we don't have to check for // it to be nil or not in various places in the code. @@ -747,15 +765,21 @@ func (s *Server) configureAccounts() error { // We would do this to add user/pass to the system account. If this is the case add in // no-auth-user for $G. if numAccounts == 2 && s.opts.NoAuthUser == _EMPTY_ { - // Create a unique name so we do not collide. - var b [8]byte - rn := rand.Int63() - for i, l := 0, rn; i < len(b); i++ { - b[i] = digits[l%base] - l /= base + // If we come here from config reload, let's not recreate the fake user name otherwise + // it will cause currently clients to be disconnected. + uname := s.sysAccOnlyNoAuthUser + if uname == _EMPTY_ { + // Create a unique name so we do not collide. + var b [8]byte + rn := rand.Int63() + for i, l := 0, rn; i < len(b); i++ { + b[i] = digits[l%base] + l /= base + } + uname = fmt.Sprintf("nats-%s", b[:]) + s.sysAccOnlyNoAuthUser = uname } - uname := fmt.Sprintf("nats-%s", b[:]) - s.opts.Users = append(s.opts.Users, &User{Username: uname, Password: string(b[:]), Account: s.gacc}) + s.opts.Users = append(s.opts.Users, &User{Username: uname, Password: uname[6:], Account: s.gacc}) s.opts.NoAuthUser = uname } } @@ -1619,7 +1643,10 @@ func (s *Server) Start() { }) } - // Start monitoring if needed + // Start OCSP Stapling monitoring for TLS certificates if enabled. + s.startOCSPMonitoring() + + // Start monitoring if needed. if err := s.StartMonitoring(); err != nil { s.Fatalf("Can't start monitoring: %v", err) return diff --git a/vendor/github.com/nats-io/nats-server/v2/server/store.go b/vendor/github.com/nats-io/nats-server/v2/server/store.go index 623d33a9..5da8274a 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/store.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/store.go @@ -45,10 +45,12 @@ var ( ErrMaxMsgs = errors.New("maximum messages exceeded") // ErrMaxBytes is returned when we have discard new as a policy and we reached the bytes limit. ErrMaxBytes = errors.New("maximum bytes exceeded") + // ErrMaxMsgsPerSubject is returned when we have discard new as a policy and we reached the message limit per subject. + ErrMaxMsgsPerSubject = errors.New("maximum messages per subject exceeded") // ErrStoreSnapshotInProgress is returned when RemoveMsg or EraseMsg is called // while a snapshot is in progress. ErrStoreSnapshotInProgress = errors.New("snapshot in progress") - // ErrMsgTooBig is returned when a message is considered too large. + // ErrMsgTooLarge is returned when a message is considered too large. ErrMsgTooLarge = errors.New("message to large") // ErrStoreWrongType is for when you access the wrong storage type. ErrStoreWrongType = errors.New("wrong storage type") @@ -69,13 +71,16 @@ type StreamStore interface { StoreRawMsg(subject string, hdr, msg []byte, seq uint64, ts int64) error SkipMsg() uint64 LoadMsg(seq uint64) (subject string, hdr, msg []byte, ts int64, err error) + LoadLastMsg(subject string) (subj string, seq uint64, hdr, msg []byte, ts int64, err error) RemoveMsg(seq uint64) (bool, error) EraseMsg(seq uint64) (bool, error) Purge() (uint64, error) + PurgeEx(subject string, seq, keep uint64) (uint64, error) Compact(seq uint64) (uint64, error) Truncate(seq uint64) error GetSeqFromTime(t time.Time) uint64 - NumFilteredPending(sseq uint64, subject string) uint64 + FilteredState(seq uint64, subject string) SimpleState + SubjectsState(filterSubject string) map[string]SimpleState State() StreamState FastState(*StreamState) Type() StorageType @@ -107,7 +112,7 @@ type DiscardPolicy int const ( // DiscardOld will remove older messages to return to the limits. DiscardOld = iota - //DiscardNew will error on a StoreMsg call + // DiscardNew will error on a StoreMsg call DiscardNew ) @@ -125,6 +130,13 @@ type StreamState struct { Consumers int `json:"consumer_count"` } +// SimpleState for filtered subject specific state. +type SimpleState struct { + Msgs uint64 `json:"messages"` + First uint64 `json:"first_seq"` + Last uint64 `json:"last_seq"` +} + // LostStreamData indicates msgs that have been lost. type LostStreamData struct { Msgs []uint64 `json:"msgs"` @@ -145,6 +157,7 @@ type ConsumerStore interface { State() (*ConsumerState, error) Stop() error Delete() error + StreamDelete() error } // SequencePair has both the consumer and the stream sequence. They point to same message. @@ -377,6 +390,7 @@ const ( deliverNewPolicyString = "new" deliverByStartSequenceString = "by_start_sequence" deliverByStartTimeString = "by_start_time" + deliverLastPerPolicyString = "last_per_subject" deliverUndefinedString = "undefined" ) @@ -386,6 +400,8 @@ func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { *p = DeliverAll case jsonString(deliverLastPolicyString): *p = DeliverLast + case jsonString(deliverLastPerPolicyString): + *p = DeliverLastPerSubject case jsonString(deliverNewPolicyString): *p = DeliverNew case jsonString(deliverByStartSequenceString): @@ -405,6 +421,8 @@ func (p DeliverPolicy) MarshalJSON() ([]byte, error) { return json.Marshal(deliverAllPolicyString) case DeliverLast: return json.Marshal(deliverLastPolicyString) + case DeliverLastPerSubject: + return json.Marshal(deliverLastPerPolicyString) case DeliverNew: return json.Marshal(deliverNewPolicyString) case DeliverByStartSequence: diff --git a/vendor/github.com/nats-io/nats-server/v2/server/stream.go b/vendor/github.com/nats-io/nats-server/v2/server/stream.go index 208c6463..42ad34e6 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/stream.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/stream.go @@ -39,14 +39,16 @@ import ( // for a given stream. If subjects is empty the name will be used. type StreamConfig struct { Name string `json:"name"` + Description string `json:"description,omitempty"` Subjects []string `json:"subjects,omitempty"` Retention RetentionPolicy `json:"retention"` MaxConsumers int `json:"max_consumers"` MaxMsgs int64 `json:"max_msgs"` MaxBytes int64 `json:"max_bytes"` - Discard DiscardPolicy `json:"discard"` MaxAge time.Duration `json:"max_age"` + MaxMsgsPer int64 `json:"max_msgs_per_subject"` MaxMsgSize int32 `json:"max_msg_size,omitempty"` + Discard DiscardPolicy `json:"discard"` Storage StorageType `json:"storage"` Replicas int `json:"num_replicas"` NoAck bool `json:"no_ack,omitempty"` @@ -57,14 +59,21 @@ type StreamConfig struct { Sources []*StreamSource `json:"sources,omitempty"` } -const JSApiPubAckResponseType = "io.nats.jetstream.api.v1.pub_ack_response" - // JSPubAckResponse is a formal response to a publish operation. type JSPubAckResponse struct { Error *ApiError `json:"error,omitempty"` *PubAck } +// ToError checks if the response has a error and if it does converts it to an error avoiding the pitfalls described by https://yourbasic.org/golang/gotcha-why-nil-error-not-equal-nil/ +func (r *JSPubAckResponse) ToError() error { + if r.Error == nil { + return nil + } + + return r.Error +} + // PubAck is the detail you get back from a publish to a stream that was successful. // e.g. +OK {"stream": "Orders", "seq": 22} type PubAck struct { @@ -197,18 +206,20 @@ type sourceInfo struct { err *ApiError last time.Time lreq time.Time + qch chan struct{} grr bool } // Headers for published messages. const ( - JSMsgId = "Nats-Msg-Id" - JSExpectedStream = "Nats-Expected-Stream" - JSExpectedLastSeq = "Nats-Expected-Last-Sequence" - JSExpectedLastMsgId = "Nats-Expected-Last-Msg-Id" - JSStreamSource = "Nats-Stream-Source" - JSLastConsumerSeq = "Nats-Last-Consumer" - JSLastStreamSeq = "Nats-Last-Stream" + JSMsgId = "Nats-Msg-Id" + JSExpectedStream = "Nats-Expected-Stream" + JSExpectedLastSeq = "Nats-Expected-Last-Sequence" + JSExpectedLastSubjSeq = "Nats-Expected-Last-Subject-Sequence" + JSExpectedLastMsgId = "Nats-Expected-Last-Msg-Id" + JSStreamSource = "Nats-Stream-Source" + JSLastConsumerSeq = "Nats-Last-Consumer" + JSLastStreamSeq = "Nats-Last-Stream" ) // Dedupe entry @@ -220,8 +231,7 @@ type ddentry struct { // Replicas Range const ( - StreamDefaultReplicas = 1 - StreamMaxReplicas = 5 + StreamMaxReplicas = 5 ) // AddStream adds a stream for the given account. @@ -250,12 +260,12 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt // Sensible defaults. cfg, err := checkStreamCfg(config) if err != nil { - return nil, err + return nil, ApiErrors[JSStreamInvalidConfigF].ErrOrNewT(err, "{err}", err) } singleServerMode := !s.JetStreamIsClustered() && s.standAloneMode() if singleServerMode && cfg.Replicas > 1 { - return nil, ErrReplicasNotSupported + return nil, ApiErrors[JSStreamReplicasNotSupportedErr] } jsa.mu.Lock() @@ -270,7 +280,7 @@ func (a *Account) addStreamWithAssignment(config *StreamConfig, fsConfig *FileSt } return mset, nil } else { - return nil, ErrJetStreamStreamAlreadyUsed + return nil, ApiErrors[JSStreamNameExistErr] } } // Check for limits. @@ -651,7 +661,7 @@ func (mset *stream) sendCreateAdvisory() { } subj := JSAdvisoryStreamCreatedPre + "." + name - outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, j, nil, 0, nil}) + outq.sendMsg(subj, j) } func (mset *stream) sendDeleteAdvisoryLocked() { @@ -674,7 +684,7 @@ func (mset *stream) sendDeleteAdvisoryLocked() { j, err := json.Marshal(m) if err == nil { subj := JSAdvisoryStreamDeletedPre + "." + mset.cfg.Name - mset.outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, j, nil, 0, nil}) + mset.outq.sendMsg(subj, j) } } @@ -697,7 +707,7 @@ func (mset *stream) sendUpdateAdvisoryLocked() { j, err := json.Marshal(m) if err == nil { subj := JSAdvisoryStreamUpdatedPre + "." + mset.cfg.Name - mset.outq.send(&jsPubMsg{subj, subj, _EMPTY_, nil, j, nil, 0, nil}) + mset.outq.sendMsg(subj, j) } } @@ -731,7 +741,7 @@ func (jsa *jsAccount) subjectsOverlap(subjects []string) bool { return false } -// Default duplicates window. +// StreamDefaultDuplicatesWindow default duplicates window. const StreamDefaultDuplicatesWindow = 2 * time.Minute func checkStreamCfg(config *StreamConfig) (StreamConfig, error) { @@ -744,6 +754,10 @@ func checkStreamCfg(config *StreamConfig) (StreamConfig, error) { if len(config.Name) > JSMaxNameLen { return StreamConfig{}, fmt.Errorf("stream name is too long, maximum allowed is %d", JSMaxNameLen) } + if len(config.Description) > JSMaxDescriptionLen { + return StreamConfig{}, fmt.Errorf("stream description is too long, maximum allowed is %d", JSMaxDescriptionLen) + } + cfg := *config // Make file the default. @@ -824,31 +838,31 @@ func (mset *stream) fileStoreConfig() (FileStoreConfig, error) { func (jsa *jsAccount) configUpdateCheck(old, new *StreamConfig) (*StreamConfig, error) { cfg, err := checkStreamCfg(new) if err != nil { - return nil, err + return nil, ApiErrors[JSStreamInvalidConfigF].ErrOrNewT(err, "{err}", err) } // Name must match. if cfg.Name != old.Name { - return nil, fmt.Errorf("stream configuration name must match original") + return nil, ApiErrors[JSStreamInvalidConfigF].NewT("{err}", "stream configuration name must match original") } // Can't change MaxConsumers for now. if cfg.MaxConsumers != old.MaxConsumers { - return nil, fmt.Errorf("stream configuration update can not change MaxConsumers") + return nil, ApiErrors[JSStreamInvalidConfigF].NewT("{err}", "stream configuration update can not change MaxConsumers") } // Can't change storage types. if cfg.Storage != old.Storage { - return nil, fmt.Errorf("stream configuration update can not change storage type") + return nil, ApiErrors[JSStreamInvalidConfigF].NewT("{err}", "stream configuration update can not change storage type") } // Can't change retention. if cfg.Retention != old.Retention { - return nil, fmt.Errorf("stream configuration update can not change retention policy") + return nil, ApiErrors[JSStreamInvalidConfigF].NewT("{err}", "stream configuration update can not change retention policy") } // Can not have a template owner for now. if old.Template != _EMPTY_ { - return nil, fmt.Errorf("stream configuration update not allowed on template owned stream") + return nil, ApiErrors[JSStreamInvalidConfigF].NewT("{err}", "stream configuration update not allowed on template owned stream") } if cfg.Template != _EMPTY_ { - return nil, fmt.Errorf("stream configuration update can not be owned by a template") + return nil, ApiErrors[JSStreamInvalidConfigF].NewT("{err}", "stream configuration update can not be owned by a template") } // Check limits. @@ -863,7 +877,7 @@ func (mset *stream) update(config *StreamConfig) error { ocfg := mset.config() cfg, err := mset.jsa.configUpdateCheck(&ocfg, config) if err != nil { - return err + return ApiErrors[JSStreamInvalidConfigF].ErrOrNewT(err, "{err}", err) } mset.mu.Lock() @@ -946,7 +960,7 @@ func (mset *stream) update(config *StreamConfig) error { } // Purge will remove all messages from the stream and underlying store. -func (mset *stream) purge() (uint64, error) { +func (mset *stream) purge(preq *JSApiStreamPurgeRequest) (purged uint64, err error) { mset.mu.Lock() if mset.client == nil { mset.mu.Unlock() @@ -961,7 +975,11 @@ func (mset *stream) purge() (uint64, error) { } mset.mu.Unlock() - purged, err := mset.store.Purge() + if preq != nil { + purged, err = mset.store.PurgeEx(preq.Subject, preq.Sequence, preq.Keep) + } else { + purged, err = mset.store.Purge() + } if err != nil { return purged, err } @@ -1185,6 +1203,10 @@ func (mset *stream) processMirrorMsgs() { mset.mu.Lock() if mset.mirror != nil { mset.mirror.grr = false + if mset.mirror.qch != nil { + close(mset.mirror.qch) + mset.mirror.qch = nil + } } mset.mu.Unlock() }() @@ -1195,7 +1217,7 @@ func (mset *stream) processMirrorMsgs() { mset.mu.Unlock() return } - msgs, mch, qch := mset.mirror.msgs, mset.mirror.msgs.mch, mset.qch + msgs, mch, qch, siqch := mset.mirror.msgs, mset.mirror.msgs.mch, mset.qch, mset.mirror.qch // Set the last seen as now so that we don't fail at the first check. mset.mirror.last = time.Now() mset.mu.Unlock() @@ -1209,6 +1231,8 @@ func (mset *stream) processMirrorMsgs() { return case <-qch: return + case <-siqch: + return case <-mch: for im := mset.pending(msgs); im != nil; im = im.next { if !mset.processInboundMirrorMsg(im) { @@ -1217,8 +1241,15 @@ func (mset *stream) processMirrorMsgs() { } case <-t.C: mset.mu.RLock() + isLeader := mset.isLeader() stalled := mset.mirror != nil && time.Since(mset.mirror.last) > 3*sourceHealthCheckInterval mset.mu.RUnlock() + // No longer leader. + if !isLeader { + mset.cancelMirrorConsumer() + return + } + // We are stalled. if stalled { mset.retryMirrorConsumer() } @@ -1323,7 +1354,7 @@ func (mset *stream) processInboundMirrorMsg(m *inMsg) bool { if node != nil { if js.limitsExceeded(stype) { s.resourcesExeededError() - err = ErrJetStreamResourcesExceeded + err = ApiErrors[JSInsufficientResourcesErr] } else { err = node.Propose(encodeStreamMsg(m.subj, _EMPTY_, m.hdr, m.msg, sseq-1, ts)) } @@ -1378,6 +1409,11 @@ func (mset *stream) cancelMirrorConsumer() { mset.mirror.sub = nil } mset.removeInternalConsumer(mset.mirror) + // If the go routine is still running close the quit chan. + if mset.mirror.qch != nil { + close(mset.mirror.qch) + mset.mirror.qch = nil + } } func (mset *stream) retryMirrorConsumer() error { @@ -1451,6 +1487,7 @@ func (mset *stream) setupMirrorConsumer() error { if !mset.mirror.grr { mset.mirror.grr = true + mset.mirror.qch = make(chan struct{}) mset.srv.startGoRoutine(func() { mset.processMirrorMsgs() }) } @@ -1473,7 +1510,7 @@ func (mset *stream) setupMirrorConsumer() error { DeliverPolicy: DeliverByStartSequence, OptStartSeq: state.LastSeq + 1, AckPolicy: AckNone, - AckWait: 48 * time.Hour, + AckWait: 22 * time.Hour, MaxDeliver: 1, Heartbeat: sourceHealthCheckInterval, FlowControl: true, @@ -1498,7 +1535,7 @@ func (mset *stream) setupMirrorConsumer() error { respCh := make(chan *JSApiConsumerCreateResponse, 1) reply := infoReplySubject() - crSub, _ := mset.subscribeInternal(reply, func(sub *subscription, c *client, subject, reply string, rmsg []byte) { + crSub, _ := mset.subscribeInternal(reply, func(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { mset.unsubscribeUnlocked(sub) _, msg := c.msgParts(rmsg) @@ -1506,7 +1543,7 @@ func (mset *stream) setupMirrorConsumer() error { if err := json.Unmarshal(msg, &ccr); err != nil { c.Warnf("JetStream bad mirror consumer create response: %q", msg) mset.cancelMirrorConsumer() - mset.setMirrorErr(jsInvalidJSONErr) + mset.setMirrorErr(ApiErrors[JSInvalidJSONErr]) return } respCh <- &ccr @@ -1550,12 +1587,12 @@ func (mset *stream) setupMirrorConsumer() error { msgs := mset.mirror.msgs // Process inbound mirror messages from the wire. - sub, err := mset.subscribeInternal(deliverSubject, func(sub *subscription, c *client, subject, reply string, rmsg []byte) { + sub, err := mset.subscribeInternal(deliverSubject, func(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { hdr, msg := c.msgParts(append(rmsg[:0:0], rmsg...)) // Need to copy. mset.queueInbound(msgs, subject, reply, hdr, msg) }) if err != nil { - mset.mirror.err = jsError(err) + mset.mirror.err = ApiErrors[JSMirrorConsumerSetupFailedErrF].ErrOrNewT(err, "{err}", err) mset.mirror.sub = nil mset.mirror.cname = _EMPTY_ } else { @@ -1595,8 +1632,7 @@ func (mset *stream) retrySourceConsumer(sname string) { return } mset.setStartingSequenceForSource(sname) - seq := si.sseq + 1 - mset.retrySourceConsumerAtSeq(sname, seq) + mset.retrySourceConsumerAtSeq(sname, si.sseq+1) } // Lock should be held. @@ -1615,13 +1651,18 @@ func (mset *stream) retrySourceConsumerAtSeq(sname string, seq uint64) { mset.setSourceConsumer(sname, seq) } -// Locl should be held. +// Lock should be held. func (mset *stream) cancelSourceConsumer(sname string) { if si := mset.sources[sname]; si != nil && si.sub != nil { mset.unsubscribe(si.sub) si.sub = nil si.sseq, si.dseq = 0, 0 mset.removeInternalConsumer(si) + // If the go routine is still running close the quit chan. + if si.qch != nil { + close(si.qch) + si.qch = nil + } } } @@ -1654,6 +1695,7 @@ func (mset *stream) setSourceConsumer(iname string, seq uint64) { if !si.grr { si.grr = true + si.qch = make(chan struct{}) mset.srv.startGoRoutine(func() { mset.processSourceMsgs(si) }) } @@ -1668,7 +1710,7 @@ func (mset *stream) setSourceConsumer(iname string, seq uint64) { Config: ConsumerConfig{ DeliverSubject: deliverSubject, AckPolicy: AckNone, - AckWait: 48 * time.Hour, + AckWait: 22 * time.Hour, MaxDeliver: 1, Heartbeat: sourceHealthCheckInterval, FlowControl: true, @@ -1695,7 +1737,7 @@ func (mset *stream) setSourceConsumer(iname string, seq uint64) { respCh := make(chan *JSApiConsumerCreateResponse, 1) reply := infoReplySubject() - crSub, _ := mset.subscribeInternal(reply, func(sub *subscription, c *client, subject, reply string, rmsg []byte) { + crSub, _ := mset.subscribeInternal(reply, func(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { mset.unsubscribe(sub) _, msg := c.msgParts(rmsg) var ccr JSApiConsumerCreateResponse @@ -1734,12 +1776,12 @@ func (mset *stream) setSourceConsumer(iname string, seq uint64) { // Capture consumer name. si.cname = ccr.ConsumerInfo.Name // Now create sub to receive messages. - sub, err := mset.subscribeInternal(deliverSubject, func(sub *subscription, c *client, subject, reply string, rmsg []byte) { + sub, err := mset.subscribeInternal(deliverSubject, func(sub *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { hdr, msg := c.msgParts(append(rmsg[:0:0], rmsg...)) // Need to copy. mset.queueInbound(si.msgs, subject, reply, hdr, msg) }) if err != nil { - si.err = jsError(err) + si.err = ApiErrors[JSSourceConsumerSetupFailedErrF].ErrOrNewT(err, "{err}", err) si.sub = nil } else { si.err = nil @@ -1759,19 +1801,24 @@ func (mset *stream) setSourceConsumer(iname string, seq uint64) { func (mset *stream) processSourceMsgs(si *sourceInfo) { s := mset.srv defer s.grWG.Done() + + if si == nil { + return + } + defer func() { mset.mu.Lock() si.grr = false + if si.qch != nil { + close(si.qch) + si.qch = nil + } mset.mu.Unlock() }() - if si == nil { - return - } - // Grab stream quit channel. mset.mu.Lock() - msgs, mch, qch := si.msgs, si.msgs.mch, mset.qch + msgs, mch, qch, siqch := si.msgs, si.msgs.mch, mset.qch, si.qch // Set the last seen as now so that we don't fail at the first check. si.last = time.Now() mset.mu.Unlock() @@ -1785,6 +1832,8 @@ func (mset *stream) processSourceMsgs(si *sourceInfo) { return case <-qch: return + case <-siqch: + return case <-mch: for im := mset.pending(msgs); im != nil; im = im.next { if !mset.processInboundSourceMsg(si, im) { @@ -1793,9 +1842,17 @@ func (mset *stream) processSourceMsgs(si *sourceInfo) { } case <-t.C: mset.mu.RLock() + iname, isLeader := si.iname, mset.isLeader() stalled := time.Since(si.last) > 3*sourceHealthCheckInterval - iname := si.iname mset.mu.RUnlock() + // No longer leader. + if !isLeader { + mset.mu.Lock() + mset.cancelSourceConsumer(iname) + mset.mu.Unlock() + return + } + // We are stalled. if stalled { mset.retrySourceConsumer(iname) } @@ -1818,7 +1875,7 @@ func (mset *stream) handleFlowControl(si *sourceInfo, m *inMsg) { } mset.fcr[si.clseq] = m.rply } else { - mset.outq.send(&jsPubMsg{m.rply, _EMPTY_, _EMPTY_, nil, nil, nil, 0, nil}) + mset.outq.sendMsg(m.rply, nil) } } @@ -1826,6 +1883,7 @@ func (mset *stream) handleFlowControl(si *sourceInfo, m *inMsg) { func (mset *stream) processInboundSourceMsg(si *sourceInfo, m *inMsg) bool { mset.mu.Lock() + // If we are no longer the leader cancel this subscriber. if !mset.isLeader() { mset.mu.Unlock() mset.cancelSourceConsumer(si.name) @@ -2091,8 +2149,7 @@ func (mset *stream) setupSourceConsumers() error { // Reset if needed. for _, si := range mset.sources { if si.sub != nil { - mset.unsubscribe(si.sub) - mset.removeInternalConsumer(si) + mset.cancelSourceConsumer(si.name) } } @@ -2143,6 +2200,11 @@ func (mset *stream) stopSourceConsumers() { } // Need to delete the old one. mset.removeInternalConsumer(si) + // If the go routine is still running close the quit chan. + if si.qch != nil { + close(si.qch) + si.qch = nil + } } } @@ -2165,6 +2227,10 @@ func (mset *stream) unsubscribeToStream() error { mset.unsubscribe(mset.mirror.sub) } mset.removeInternalConsumer(mset.mirror) + // If the go routine is still running close the quit chan. + if mset.mirror.qch != nil { + close(mset.mirror.qch) + } mset.mirror = nil } @@ -2182,9 +2248,6 @@ func (mset *stream) subscribeInternal(subject string, cb msgHandler) (*subscript if c == nil { return nil, fmt.Errorf("invalid stream") } - if !c.srv.eventsEnabled() { - return nil, ErrNoSysAccount - } if cb == nil { return nil, fmt.Errorf("undefined message handler") } @@ -2255,7 +2318,8 @@ func (mset *stream) setupStore(fsCfg *FileStoreConfig) error { } mset.store = ms case FileStorage: - fs, err := newFileStoreWithCreated(*fsCfg, mset.cfg, mset.created) + s := mset.srv + fs, err := newFileStoreWithCreated(*fsCfg, mset.cfg, mset.created, s.jsKeyGen(mset.acc.Name)) if err != nil { mset.mu.Unlock() return err @@ -2269,16 +2333,6 @@ func (mset *stream) setupStore(fsCfg *FileStoreConfig) error { return nil } -// Clears out any filtered index from filestores. -func (mset *stream) clearFilterIndex() { - mset.mu.Lock() - defer mset.mu.Unlock() - - if fs, ok := mset.store.(*fileStore); ok { - fs.clearFilterIndex() - } -} - // Called for any updates to the underlying stream. We pass through the bytes to the // jetstream account. We do local processing for stream pending for consumers, but only // for removals. @@ -2390,6 +2444,15 @@ func getExpectedLastSeq(hdr []byte) uint64 { return uint64(parseInt64(bseq)) } +// Fast lookup of expected stream sequence per subject. +func getExpectedLastSeqPerSubject(hdr []byte) uint64 { + bseq := getHeader(JSExpectedLastSubjSeq, hdr) + if len(bseq) == 0 { + return 0 + } + return uint64(parseInt64(bseq)) +} + // Lock should be held. func (mset *stream) isClustered() bool { return mset.node != nil @@ -2454,7 +2517,7 @@ func (mset *stream) queueInboundMsg(subj, rply string, hdr, msg []byte) { } // processInboundJetStreamMsg handles processing messages bound for a stream. -func (mset *stream) processInboundJetStreamMsg(_ *subscription, c *client, subject, reply string, rmsg []byte) { +func (mset *stream) processInboundJetStreamMsg(_ *subscription, c *client, _ *Account, subject, reply string, rmsg []byte) { mset.mu.RLock() isLeader, isClustered := mset.isLeader(), mset.node != nil mset.mu.RUnlock() @@ -2531,9 +2594,9 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, mset.mu.Unlock() if canRespond && outq != nil { resp.PubAck = &PubAck{Stream: name} - resp.Error = &ApiError{Code: 503, Description: "expected stream sequence does not match"} + resp.Error = ApiErrors[JSStreamSequenceNotMatchErr] b, _ := json.Marshal(resp) - outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil}) + outq.sendMsg(reply, b) } return errLastSeqMismatch } @@ -2556,7 +2619,7 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, if canRespond { response := append(pubAck, strconv.FormatUint(dde.seq, 10)...) response = append(response, ",\"duplicate\": true}"...) - outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil}) + outq.sendMsg(reply, response) } return errMsgIdDuplicate } @@ -2567,9 +2630,9 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, mset.mu.Unlock() if canRespond { resp.PubAck = &PubAck{Stream: name} - resp.Error = &ApiError{Code: 400, Description: "expected stream does not match"} + resp.Error = ApiErrors[JSStreamNotMatchErr] b, _ := json.Marshal(resp) - outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil}) + outq.sendMsg(reply, b) } return errors.New("expected stream does not match") } @@ -2580,9 +2643,9 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, mset.mu.Unlock() if canRespond { resp.PubAck = &PubAck{Stream: name} - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("wrong last sequence: %d", mlseq)} + resp.Error = ApiErrors[JSStreamWrongLastSequenceErrF].NewT("{seq}", mlseq) b, _ := json.Marshal(resp) - outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil}) + outq.sendMsg(reply, b) } return fmt.Errorf("last sequence mismatch: %d vs %d", seq, mlseq) } @@ -2593,12 +2656,28 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, mset.mu.Unlock() if canRespond { resp.PubAck = &PubAck{Stream: name} - resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("wrong last msg ID: %s", last)} + resp.Error = ApiErrors[JSStreamWrongLastMsgIDErrF].NewT("{id}", last) b, _ := json.Marshal(resp) - outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil}) + outq.sendMsg(reply, b) } return fmt.Errorf("last msgid mismatch: %q vs %q", lmsgId, last) } + // Expected last sequence per subject. + if seq := getExpectedLastSeqPerSubject(hdr); seq > 0 { + // TODO(dlc) - We could make a new store func that does this all in one. + _, lseq, _, _, _, err := mset.store.LoadLastMsg(subject) + if err != nil || lseq != seq { + mset.clfs++ + mset.mu.Unlock() + if canRespond { + resp.PubAck = &PubAck{Stream: name} + resp.Error = ApiErrors[JSStreamWrongLastSequenceErrF].NewT("{seq}", lseq) + b, _ := json.Marshal(resp) + outq.sendMsg(reply, b) + } + return fmt.Errorf("last sequence by subject mismatch: %d vs %d", seq, lseq) + } + } } // Response Ack. @@ -2614,9 +2693,9 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, mset.mu.Unlock() if canRespond { resp.PubAck = &PubAck{Stream: name} - resp.Error = &ApiError{Code: 400, Description: "message size exceeds maximum allowed"} + resp.Error = ApiErrors[JSStreamMessageExceedsMaximumErr] b, _ := json.Marshal(resp) - mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil}) + mset.outq.sendMsg(reply, b) } return ErrMaxPayload } @@ -2628,15 +2707,15 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, mset.mu.Unlock() if canRespond { resp.PubAck = &PubAck{Stream: name} - resp.Error = jsInsufficientErr + resp.Error = ApiErrors[JSInsufficientResourcesErr] b, _ := json.Marshal(resp) - mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil}) + mset.outq.sendMsg(reply, b) } // Stepdown regardless. if node := mset.raftNode(); node != nil { node.StepDown() } - return ErrJetStreamResourcesExceeded + return ApiErrors[JSInsufficientResourcesErr] } var noInterest bool @@ -2671,7 +2750,7 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, if canRespond { response = append(pubAck, strconv.FormatUint(mset.lseq, 10)...) response = append(response, '}') - mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil}) + mset.outq.sendMsg(reply, response) } // If we have a msgId make sure to save. if msgId != _EMPTY_ { @@ -2702,27 +2781,33 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, } if err != nil { - // If we did not succeed put those values back. + // If we did not succeed put those values back and increment clfs in case we are clustered. mset.mu.Lock() var state StreamState mset.store.FastState(&state) mset.lseq = state.LastSeq mset.lmsgId = olmsgId + mset.clfs++ mset.mu.Unlock() - if err != ErrStoreClosed { - s.Errorf("JetStream failed to store a msg on stream '%s > %s' - %v", accName, name, err) + switch err { + case ErrMaxMsgs, ErrMaxBytes, ErrMaxMsgsPerSubject, ErrMsgTooLarge: + s.Debugf("JetStream failed to store a msg on stream '%s > %s': %v", accName, name, err) + case ErrStoreClosed: + default: + s.Errorf("JetStream failed to store a msg on stream '%s > %s': %v", accName, name, err) } + if canRespond { resp.PubAck = &PubAck{Stream: name} - resp.Error = &ApiError{Code: 503, Description: err.Error()} + resp.Error = ApiErrors[JSStreamStoreFailedF].ErrOrNewT(err, "{err}", err) response, _ = json.Marshal(resp) } } else if jsa.limitsExceeded(stype) { s.Warnf("JetStream resource limits exceeded for account: %q", accName) if canRespond { resp.PubAck = &PubAck{Stream: name} - resp.Error = &ApiError{Code: 400, Description: "resource limits exceeded for account"} + resp.Error = ApiErrors[JSAccountResourcesExceededErr] response, _ = json.Marshal(resp) } // If we did not succeed put those values back. @@ -2735,6 +2820,7 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, store.RemoveMsg(seq) seq = 0 } else { + // No errors, this is the normal path. // If we have a msgId make sure to save. if msgId != _EMPTY_ { mset.storeMsgId(&ddentry{msgId, seq, ts}) @@ -2747,7 +2833,7 @@ func (mset *stream) processJetStreamMsg(subject, reply string, hdr, msg []byte, // Send response here. if canRespond { - mset.outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil}) + mset.outq.sendMsg(reply, response) } if err == nil && seq > 0 && numConsumers > 0 { @@ -2806,6 +2892,12 @@ func (q *jsOutQ) pending() *jsPubMsg { return head } +func (q *jsOutQ) sendMsg(subj string, msg []byte) { + if q != nil { + q.send(&jsPubMsg{subj, _EMPTY_, _EMPTY_, nil, msg, nil, 0, nil}) + } +} + func (q *jsOutQ) send(msg *jsPubMsg) { if q == nil || msg == nil { return @@ -2880,6 +2972,9 @@ func (mset *stream) internalLoop() { isClustered := mset.cfg.Replicas > 1 mset.mu.RUnlock() + // Raw scratch buffer. + var _r [64 * 1024]byte + for { select { case <-outq.mch: @@ -2890,17 +2985,20 @@ func (mset *stream) internalLoop() { c.pa.szb = []byte(strconv.Itoa(c.pa.size)) c.pa.reply = []byte(pm.reply) - var msg []byte + msg := _r[:0] if len(pm.hdr) > 0 { c.pa.hdr = len(pm.hdr) c.pa.hdb = []byte(strconv.Itoa(c.pa.hdr)) - msg = append(pm.hdr, pm.msg...) - msg = append(msg, _CRLF_...) + msg = append(msg, pm.hdr...) + msg = append(msg, pm.msg...) } else { c.pa.hdr = -1 c.pa.hdb = nil - msg = append(pm.msg, _CRLF_...) + if len(pm.msg) > 0 { + msg = append(msg, pm.msg...) + } } + msg = append(msg, _CRLF_...) didDeliver, _ := c.processInboundClientMsg(msg) c.pa.szb = nil @@ -2911,7 +3009,7 @@ func (mset *stream) internalLoop() { pm.o.didNotDeliver(pm.seq) } } - c.flushClients(10 * time.Millisecond) + c.flushClients(0) case <-mch: for im := mset.pending(mset.msgs); im != nil; im = im.next { // If we are clustered we need to propose this message to the underlying raft group. @@ -2943,7 +3041,7 @@ func (mset *stream) stop(deleteFlag, advisory bool) error { mset.mu.RUnlock() if jsa == nil { - return ErrJetStreamNotEnabledForAccount + return ApiErrors[JSNotEnabledForAccountErr] } // Remove from our account map. @@ -2970,15 +3068,14 @@ func (mset *stream) stop(deleteFlag, advisory bool) error { mset.cancelSourceConsumer(si.iname) } } - mset.mu.Unlock() + mset.mu.Unlock() for _, o := range obs { - // Second flag says do not broadcast to signal. + // Third flag says do not broadcast a signal. // TODO(dlc) - If we have an err here we don't want to stop // but should we log? - o.stopWithFlags(deleteFlag, false, advisory) + o.stopWithFlags(deleteFlag, deleteFlag, false, advisory) } - mset.mu.Lock() // Stop responding to sync requests. @@ -3113,10 +3210,10 @@ func (mset *stream) setConsumer(o *consumer) { } func (mset *stream) removeConsumer(o *consumer) { - if o.cfg.FilterSubject != _EMPTY_ { + if o.cfg.FilterSubject != _EMPTY_ && mset.numFilter > 0 { mset.numFilter-- } - if o.cfg.Direct { + if o.cfg.Direct && mset.directs > 0 { mset.directs-- } delete(mset.consumers, o.name) @@ -3235,7 +3332,7 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error cfg, err := checkStreamCfg(ncfg) if err != nil { - return nil, err + return nil, ApiErrors[JSStreamNotFoundErr].ErrOr(err) } _, jsa, err := a.checkForJetStream() @@ -3270,8 +3367,7 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error return nil, err } fpath := path.Join(sdir, filepath.Clean(hdr.Name)) - pdir := filepath.Dir(fpath) - os.MkdirAll(pdir, defaultDirPerms) + os.MkdirAll(filepath.Dir(fpath), defaultDirPerms) fd, err := os.OpenFile(fpath, os.O_CREATE|os.O_RDWR, 0600) if err != nil { return nil, err @@ -3285,7 +3381,6 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error // Check metadata. // The cfg passed in will be the new identity for the stream. - var fcfg FileStreamInfo b, err := ioutil.ReadFile(path.Join(sdir, JetStreamMetaFile)) if err != nil { @@ -3297,14 +3392,18 @@ func (a *Account) RestoreStream(ncfg *StreamConfig, r io.Reader) (*stream, error // See if this stream already exists. if _, err := a.lookupStream(cfg.Name); err == nil { - return nil, ErrJetStreamStreamAlreadyUsed + return nil, ApiErrors[JSStreamNameExistErr] } // Move into the correct place here. ndir := path.Join(jsa.storeDir, streamsDir, cfg.Name) - // Remove old one if for some reason is here. - if _, err := os.Stat(ndir); !os.IsNotExist(err) { + // Remove old one if for some reason it is still here. + if _, err := os.Stat(ndir); err == nil { os.RemoveAll(ndir) } + // Make sure our destination streams directory exists. + if err := os.MkdirAll(path.Join(jsa.storeDir, streamsDir), defaultDirPerms); err != nil { + return nil, err + } // Move into new location. if err := os.Rename(sdir, ndir); err != nil { return nil, err diff --git a/vendor/github.com/nats-io/nats-server/v2/server/sublist.go b/vendor/github.com/nats-io/nats-server/v2/server/sublist.go index 83925ed3..9520e26d 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/sublist.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/sublist.go @@ -29,7 +29,9 @@ import ( // Common byte variables for wildcards and token separator. const ( pwc = '*' + pwcs = "*" fwc = '>' + fwcs = ">" tsep = "." btsep = '.' ) @@ -1185,10 +1187,8 @@ func tokenAt(subject string, index uint8) string { return _EMPTY_ } -// Calls into the function isSubsetMatch() -func subjectIsSubsetMatch(subject, test string) bool { - tsa := [32]string{} - tts := tsa[:0] +// use similar to append. meaning, the updated slice will be returned +func tokenizeSubjectIntoSlice(tts []string, subject string) []string { start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { @@ -1197,27 +1197,31 @@ func subjectIsSubsetMatch(subject, test string) bool { } } tts = append(tts, subject[start:]) + return tts +} + +// Calls into the function isSubsetMatch() +func subjectIsSubsetMatch(subject, test string) bool { + tsa := [32]string{} + tts := tokenizeSubjectIntoSlice(tsa[:0], subject) return isSubsetMatch(tts, test) } // This will test a subject as an array of tokens against a test subject -// and determine if the tokens are matched. Both test subject and tokens -// may contain wildcards. So foo.* is a subset match of [">", "*.*", "foo.*"], -// but not of foo.bar, etc. +// Calls into the function isSubsetMatchTokenized func isSubsetMatch(tokens []string, test string) bool { tsa := [32]string{} - tts := tsa[:0] - start := 0 - for i := 0; i < len(test); i++ { - if test[i] == btsep { - tts = append(tts, test[start:i]) - start = i + 1 - } - } - tts = append(tts, test[start:]) + tts := tokenizeSubjectIntoSlice(tsa[:0], test) + return isSubsetMatchTokenized(tokens, tts) +} +// This will test a subject as an array of tokens against a test subject (also encoded as array of tokens) +// and determine if the tokens are matched. Both test subject and tokens +// may contain wildcards. So foo.* is a subset match of [">", "*.*", "foo.*"], +// but not of foo.bar, etc. +func isSubsetMatchTokenized(tokens, test []string) bool { // Walk the target tokens - for i, t2 := range tts { + for i, t2 := range test { if i >= len(tokens) { return false } @@ -1240,7 +1244,7 @@ func isSubsetMatch(tokens []string, test string) bool { if !m { return false } - if i >= len(tts) { + if i >= len(test) { return true } continue @@ -1249,7 +1253,7 @@ func isSubsetMatch(tokens []string, test string) bool { return false } } - return len(tokens) == len(tts) + return len(tokens) == len(test) } // matchLiteral is used to test literal subjects, those that do not have any diff --git a/vendor/github.com/nats-io/nats-server/v2/server/websocket.go b/vendor/github.com/nats-io/nats-server/v2/server/websocket.go index b699631d..ab4ce822 100644 --- a/vendor/github.com/nats-io/nats-server/v2/server/websocket.go +++ b/vendor/github.com/nats-io/nats-server/v2/server/websocket.go @@ -75,7 +75,6 @@ const ( wsFirstFrame = true wsContFrame = false wsFinalFrame = true - wsCompressedFrame = true wsUncompressedFrame = false wsSchemePrefix = "ws" @@ -92,6 +91,7 @@ const ( ) var decompressorPool sync.Pool +var compressLastBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff} // From https://tools.ietf.org/html/rfc6455#section-1.3 var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") @@ -144,7 +144,8 @@ type wsReadInfo struct { mask bool // Incoming leafnode connections may not have masking. mkpos byte mkey [4]byte - buf []byte + cbufs [][]byte + coff int } func (r *wsReadInfo) init() { @@ -292,42 +293,118 @@ func (c *client) wsRead(r *wsReadInfo, ior io.Reader, buf []byte) ([][]byte, err b = buf[pos : pos+n] pos += n r.rem -= n - if r.fc { - r.buf = append(r.buf, b...) - b = r.buf + // If needed, unmask the buffer + if r.mask { + r.unmask(b) } - if !r.fc || r.rem == 0 { - if r.mask { - r.unmask(b) - } - if r.fc { - // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 - // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader - // does not report unexpected EOF. - b = append(b, 0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff) - br := bytes.NewBuffer(b) - d, _ := decompressorPool.Get().(io.ReadCloser) - if d == nil { - d = flate.NewReader(br) - } else { - d.(flate.Resetter).Reset(br, nil) - } - b, err = ioutil.ReadAll(d) - decompressorPool.Put(d) + addToBufs := true + // Handle compressed message + if r.fc { + // Assume that we may have continuation frames or not the full payload. + addToBufs = false + // Make a copy of the buffer before adding it to the list + // of compressed fragments. + r.cbufs = append(r.cbufs, append([]byte(nil), b...)) + // When we have the final frame and we have read the full payload, + // we can decompress it. + if r.ff && r.rem == 0 { + b, err = r.decompress() if err != nil { return bufs, err } + r.fc = false + // Now we can add to `bufs` + addToBufs = true } + } + // For non compressed frames, or when we have decompressed the + // whole message. + if addToBufs { bufs = append(bufs, b) - if r.rem == 0 { - r.fs, r.fc, r.buf = true, false, nil - } + } + // If payload has been fully read, then indicate that next + // is the start of a frame. + if r.rem == 0 { + r.fs = true } } } return bufs, nil } +func (r *wsReadInfo) Read(dst []byte) (int, error) { + if len(dst) == 0 { + return 0, nil + } + if len(r.cbufs) == 0 { + return 0, io.EOF + } + copied := 0 + rem := len(dst) + for buf := r.cbufs[0]; buf != nil && rem > 0; { + n := len(buf[r.coff:]) + if n > rem { + n = rem + } + copy(dst[copied:], buf[r.coff:r.coff+n]) + copied += n + rem -= n + r.coff += n + buf = r.nextCBuf() + } + return copied, nil +} + +func (r *wsReadInfo) nextCBuf() []byte { + // We still have remaining data in the first buffer + if r.coff != len(r.cbufs[0]) { + return r.cbufs[0] + } + // We read the full first buffer. Reset offset. + r.coff = 0 + // We were at the last buffer, so we are done. + if len(r.cbufs) == 1 { + r.cbufs = nil + return nil + } + // Here we move to the next buffer. + r.cbufs = r.cbufs[1:] + return r.cbufs[0] +} + +func (r *wsReadInfo) ReadByte() (byte, error) { + if len(r.cbufs) == 0 { + return 0, io.EOF + } + b := r.cbufs[0][r.coff] + r.coff++ + r.nextCBuf() + return b, nil +} + +func (r *wsReadInfo) decompress() ([]byte, error) { + r.coff = 0 + // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 + // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader + // does not report unexpected EOF. + r.cbufs = append(r.cbufs, compressLastBlock) + // Get a decompressor from the pool and bind it to this object (wsReadInfo) + // that provides Read() and ReadByte() APIs that will consume the compressed + // buffers (r.cbufs). + d, _ := decompressorPool.Get().(io.ReadCloser) + if d == nil { + d = flate.NewReader(r) + } else { + d.(flate.Resetter).Reset(r, nil) + } + // This will do the decompression. + b, err := ioutil.ReadAll(d) + decompressorPool.Put(d) + // Now reset the compressed buffers list. + r.cbufs = nil + return b, err +} + // Handles the PING, PONG and CLOSE websocket control frames. // // Client lock MUST NOT be held on entry. @@ -1211,7 +1288,9 @@ func (c *client) wsCollapsePtoNB() (net.Buffers, int64) { final = true } fh := make([]byte, wsMaxFrameHeaderSize) - n, key := wsFillFrameHeader(fh, mask, first, final, wsCompressedFrame, wsBinaryMessage, lp) + // Only the first frame should be marked as compressed, so pass + // `first` for the compressed boolean. + n, key := wsFillFrameHeader(fh, mask, first, final, first, wsBinaryMessage, lp) if mask { wsMaskBuf(key, p[:lp]) } diff --git a/vendor/github.com/nats-io/nats.go/go_test.mod b/vendor/github.com/nats-io/nats.go/go_test.mod index 72e30d62..3c47e812 100644 --- a/vendor/github.com/nats-io/nats.go/go_test.mod +++ b/vendor/github.com/nats-io/nats.go/go_test.mod @@ -4,7 +4,7 @@ go 1.15 require ( github.com/golang/protobuf v1.4.2 - github.com/nats-io/nats-server/v2 v2.2.3-0.20210501163444-670f44f1e82e + github.com/nats-io/nats-server/v2 v2.2.7-0.20210618192106-93a3720475a4 github.com/nats-io/nkeys v0.3.0 github.com/nats-io/nuid v1.0.1 google.golang.org/protobuf v1.23.0 diff --git a/vendor/github.com/nats-io/nats.go/go_test.sum b/vendor/github.com/nats-io/nats.go/go_test.sum index 7567402c..09550633 100644 --- a/vendor/github.com/nats-io/nats.go/go_test.sum +++ b/vendor/github.com/nats-io/nats.go/go_test.sum @@ -9,77 +9,37 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc= github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/jwt v0.3.3-0.20200519195258-f2bf5ce574c7/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M= -github.com/nats-io/jwt v1.1.0/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M= github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.0-20200916203241-1f8ce17dff02/go.mod h1:vs+ZEjP+XKy8szkBmQwCB7RjYdIlMaPsFPs4VdS4bTQ= -github.com/nats-io/jwt/v2 v2.0.0-20201015190852-e11ce317263c/go.mod h1:vs+ZEjP+XKy8szkBmQwCB7RjYdIlMaPsFPs4VdS4bTQ= -github.com/nats-io/jwt/v2 v2.0.0-20210125223648-1c24d462becc/go.mod h1:PuO5FToRL31ecdFqVjc794vK0Bj0CwzveQEDvkb7MoQ= -github.com/nats-io/jwt/v2 v2.0.0-20210208203759-ff814ca5f813/go.mod h1:PuO5FToRL31ecdFqVjc794vK0Bj0CwzveQEDvkb7MoQ= -github.com/nats-io/jwt/v2 v2.0.1 h1:SycklijeduR742i/1Y3nRhURYM7imDzZZ3+tuAQqhQA= -github.com/nats-io/jwt/v2 v2.0.1/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= -github.com/nats-io/nats-server/v2 v2.1.8-0.20200524125952-51ebd92a9093/go.mod h1:rQnBf2Rv4P9adtAs/Ti6LfFmVtFG6HLhl/H7cVshcJU= -github.com/nats-io/nats-server/v2 v2.1.8-0.20200601203034-f8d6dd992b71/go.mod h1:Nan/1L5Sa1JRW+Thm4HNYcIDcVRFc5zK9OpSZeI2kk4= -github.com/nats-io/nats-server/v2 v2.1.8-0.20200929001935-7f44d075f7ad/go.mod h1:TkHpUIDETmTI7mrHN40D1pzxfzHZuGmtMbtb83TGVQw= -github.com/nats-io/nats-server/v2 v2.1.8-0.20201129161730-ebe63db3e3ed/go.mod h1:XD0zHR/jTXdZvWaQfS5mQgsXj6x12kMjKLyAk/cOGgY= -github.com/nats-io/nats-server/v2 v2.1.8-0.20210205154825-f7ab27f7dad4/go.mod h1:kauGd7hB5517KeSqspW2U1Mz/jhPbTrE8eOXzUPk1m0= -github.com/nats-io/nats-server/v2 v2.1.8-0.20210227190344-51550e242af8/go.mod h1:/QQ/dpqFavkNhVnjvMILSQ3cj5hlmhB66adlgNbjuoA= -github.com/nats-io/nats-server/v2 v2.2.1-0.20210330155036-61cbd74e213d/go.mod h1:eKlAaGmSQHZMFQA6x56AaP5/Bl9N3mWF4awyT2TTpzc= -github.com/nats-io/nats-server/v2 v2.2.1 h1:QaWKih9qAa1kod7xXy0G1ry0AEUGmDEaptaiqzuO1e8= -github.com/nats-io/nats-server/v2 v2.2.1/go.mod h1:A+5EOqdnhH7FvLxtAK6SEDx6hyHriVOwf+FT/eEV99c= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421001316-7ac0ff667439 h1:wbm+DoCrBx3XUkfgfnzSGKGKXSSnR8z0EzaH8iEsYT4= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421001316-7ac0ff667439/go.mod h1:A+5EOqdnhH7FvLxtAK6SEDx6hyHriVOwf+FT/eEV99c= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421031524-a3f66508dd3a h1:Ihh+7S9hHb3zn4nibE9EV8P3Ed7OrH4TlGXHqIUYDfk= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421031524-a3f66508dd3a/go.mod h1:aF2IwMZdYktJswITm41c/k66uCHjTvpTxGQ7+d4cPeg= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421135834-a9607573b30c h1:URcPI+y2OIGWM1pKzHhHTvRItB0Czlv3dzuJA0rklvk= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421135834-a9607573b30c/go.mod h1:aF2IwMZdYktJswITm41c/k66uCHjTvpTxGQ7+d4cPeg= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421164150-3d928c847a0c h1:cbbxAcABuk2WdXKRm9VezFcGsceRhls4VCmQ/2aRJjQ= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421164150-3d928c847a0c/go.mod h1:aF2IwMZdYktJswITm41c/k66uCHjTvpTxGQ7+d4cPeg= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421195432-ea21e86996f7 h1:wcd++VZMdwDpQ7P1VXJ7NpAwtgdlxcjFLZ12Y/pL8Nw= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421195432-ea21e86996f7/go.mod h1:aF2IwMZdYktJswITm41c/k66uCHjTvpTxGQ7+d4cPeg= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421215445-a48a39251636 h1:iy6c/tV66xi5DT9WLUu9rJ8uQj8Kf7kmwHAqlYfczP4= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421215445-a48a39251636/go.mod h1:aF2IwMZdYktJswITm41c/k66uCHjTvpTxGQ7+d4cPeg= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421232642-f2d3f5fb81d0 h1:e2MoeAShQE/oOSjkkV6J6R+l5ugbfkXI5spxgQykgoM= -github.com/nats-io/nats-server/v2 v2.2.2-0.20210421232642-f2d3f5fb81d0/go.mod h1:aF2IwMZdYktJswITm41c/k66uCHjTvpTxGQ7+d4cPeg= -github.com/nats-io/nats-server/v2 v2.2.3-0.20210501163444-670f44f1e82e h1:Hvpz1/Epth4q7LnaU0U9SqMFd8grUMFTL8LMO5HFVok= -github.com/nats-io/nats-server/v2 v2.2.3-0.20210501163444-670f44f1e82e/go.mod h1:aF2IwMZdYktJswITm41c/k66uCHjTvpTxGQ7+d4cPeg= -github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= -github.com/nats-io/nats.go v1.10.1-0.20200531124210-96f2130e4d55/go.mod h1:ARiFsjW9DVxk48WJbO3OSZ2DG8fjkMi7ecLmXoY/n9I= -github.com/nats-io/nats.go v1.10.1-0.20200606002146-fc6fed82929a/go.mod h1:8eAIv96Mo9QW6Or40jUHejS7e4VwZ3VRYD6Sf0BTDp4= -github.com/nats-io/nats.go v1.10.1-0.20201021145452-94be476ad6e0/go.mod h1:VU2zERjp8xmF+Lw2NH4u2t5qWZxwc7jB3+7HVMWQXPI= -github.com/nats-io/nats.go v1.10.1-0.20210127212649-5b4924938a9a/go.mod h1:Sa3kLIonafChP5IF0b55i9uvGR10I3hPETFbi4+9kOI= -github.com/nats-io/nats.go v1.10.1-0.20210211000709-75ded9c77585/go.mod h1:uBWnCKg9luW1g7hgzPxUjHFRI40EuTSX7RCzgnc74Jk= -github.com/nats-io/nats.go v1.10.1-0.20210228004050-ed743748acac/go.mod h1:hxFvLNbNmT6UppX5B5Tr/r3g+XSwGjJzFn6mxPNJEHc= -github.com/nats-io/nats.go v1.10.1-0.20210330225420-a0b1f60162f8/go.mod h1:Zq9IEHy7zurF0kFbU5aLIknnFI7guh8ijHk+2v+Vf5g= -github.com/nats-io/nats.go v1.10.1-0.20210419223411-20527524c393/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.2.6 h1:FPK9wWx9pagxcw14s8W9rlfzfyHm61uNLnJyybZbn48= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= +github.com/nats-io/nats-server/v2 v2.2.7-0.20210615172038-0069f752b61b h1:hy5rgG4Hur55cWBKxD/VbkjaRYYAxo5Ayk9AxGJcHTs= +github.com/nats-io/nats-server/v2 v2.2.7-0.20210615172038-0069f752b61b/go.mod h1:hBgcnXvNESvh65J1nMtxaHHsaUxSmteZXCH1JLTuvfg= +github.com/nats-io/nats-server/v2 v2.2.7-0.20210618192106-93a3720475a4 h1:8QM5O7j1a9SdEPzzpQj7daRu4fi/sxfXRxfcKGa5Dr0= +github.com/nats-io/nats-server/v2 v2.2.7-0.20210618192106-93a3720475a4/go.mod h1:hBgcnXvNESvh65J1nMtxaHHsaUxSmteZXCH1JLTuvfg= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b h1:wSOdpTq0/eI46Ez/LkDwIsAKA71YP2SRKBODiRWM0as= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210505212654-3497b51f5e64 h1:QuAh/1Gwc0d+u9walMU1NqzhRemNegsv5esp2ALQIY4= +golang.org/x/crypto v0.0.0-20210505212654-3497b51f5e64/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/vendor/github.com/nats-io/nats.go/js.go b/vendor/github.com/nats-io/nats.go/js.go index 7b70bb1e..7a0b8e47 100644 --- a/vendor/github.com/nats-io/nats.go/js.go +++ b/vendor/github.com/nats-io/nats.go/js.go @@ -35,6 +35,9 @@ const ( // defaultAPIPrefix is the default prefix for the JetStream API. defaultAPIPrefix = "$JS.API." + // jsDomainT is used to create JetStream API prefix by specifying only Domain + jsDomainT = "$JS.%s.API." + // apiAccountInfo is for obtaining general information about JetStream. apiAccountInfo = "INFO" @@ -118,6 +121,9 @@ type JetStream interface { // ChanSubscribe creates channel based Subscription. ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) + // ChanQueueSubscribe creates channel based Subscription with a queue group. + ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) + // QueueSubscribe creates a Subscription with a queue group. QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) @@ -167,6 +173,7 @@ const ( ) // JetStream returns a JetStreamContext for messaging and stream management. +// Errors are only returned if inconsistent options are provided. func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { js := &js{ nc: nc, @@ -181,26 +188,6 @@ func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { return nil, err } } - - // If we have check recently we can avoid another account lookup here. - // We want these to be lighweight and created at will. - nc.mu.Lock() - now := time.Now() - checkAccount := now.Sub(nc.jsLastCheck) > defaultAccountCheck - if checkAccount { - nc.jsLastCheck = now - } - nc.mu.Unlock() - - if checkAccount { - if _, err := js.AccountInfo(); err != nil { - if err == ErrNoResponders { - err = ErrJetStreamNotEnabled - } - return nil, err - } - } - return js, nil } @@ -216,6 +203,11 @@ func (opt jsOptFn) configureJSContext(opts *jsOpts) error { return opt(opts) } +// Domain changes the domain part of JetSteam API prefix. +func Domain(domain string) JSOpt { + return APIPrefix(fmt.Sprintf(jsDomainT, domain)) +} + // APIPrefix changes the default prefix used for the JetStream API. func APIPrefix(pre string) JSOpt { return jsOptFn(func(js *jsOpts) error { @@ -703,7 +695,7 @@ func ExpectLastSequence(seq uint64) PubOpt { }) } -// ExpectLastSequence sets the expected sequence in the response from the publish. +// ExpectLastMsgId sets the expected sequence in the response from the publish. func ExpectLastMsgId(id string) PubOpt { return pubOptFn(func(opts *pubOpts) error { opts.lid = id @@ -987,11 +979,16 @@ func (js *js) QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscript return js.subscribe(subj, queue, nil, mch, true, opts) } -// Subscribe will create a subscription to the appropriate stream and consumer. +// ChanSubscribe will create a subscription to the appropriate stream and consumer using a channel. func (js *js) ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { return js.subscribe(subj, _EMPTY_, nil, ch, false, opts) } +// ChanQueueSubscribe will create a subscription to the appropriate stream and consumer using a channel. +func (js *js) ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { + return js.subscribe(subj, queue, nil, ch, false, opts) +} + // PullSubscribe creates a pull subscriber. func (js *js) PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) { return js.subscribe(subj, _EMPTY_, nil, nil, false, append(opts, Durable(durable))) @@ -1008,7 +1005,7 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync } } - isPullMode := ch == nil && cb == nil + isPullMode := ch == nil && cb == nil && !isSync badPullAck := o.cfg.AckPolicy == AckNonePolicy || o.cfg.AckPolicy == AckAllPolicy hasHeartbeats := o.cfg.Heartbeat > 0 hasFC := o.cfg.FlowControl @@ -1017,17 +1014,26 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync } var ( - err error - shouldCreate bool - ccfg *ConsumerConfig - info *ConsumerInfo - deliver string - attached bool - stream = o.stream - consumer = o.consumer - isDurable = o.cfg.Durable != _EMPTY_ + err error + shouldCreate bool + ccfg *ConsumerConfig + info *ConsumerInfo + deliver string + attached bool + stream = o.stream + consumer = o.consumer + isDurable = o.cfg.Durable != _EMPTY_ + consumerBound = o.bound + notFoundErr bool + lookupErr bool ) + // In case a consumer has not been set explicitly, then the + // durable name will be used as the consumer name. + if consumer == _EMPTY_ { + consumer = o.cfg.Durable + } + // Find the stream mapped to the subject if not bound to a stream already. if o.stream == _EMPTY_ { stream, err = js.lookupStreamBySubject(subj) @@ -1038,18 +1044,16 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync stream = o.stream } - // With an explicit durable name, then can lookup - // the consumer to which it should be attaching to. - consumer = o.cfg.Durable + // With an explicit durable name, then can lookup the consumer first + // to which it should be attaching to. if consumer != _EMPTY_ { - // Only create in case there is no consumer already. info, err = js.ConsumerInfo(stream, consumer) - if err != nil && err.Error() != "nats: consumer not found" { - return nil, err - } + notFoundErr = err != nil && strings.Contains(err.Error(), "consumer not found") + lookupErr = err == ErrJetStreamNotEnabled || err == ErrTimeout || err == context.DeadlineExceeded } - if info != nil { + switch { + case info != nil: // Attach using the found consumer config. ccfg = &info.Config attached = true @@ -1059,12 +1063,25 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync return nil, ErrSubjectMismatch } + // Prevent binding a subscription against incompatible consumer types. + if isPullMode && ccfg.DeliverSubject != _EMPTY_ { + return nil, ErrPullSubscribeToPushConsumer + } else if !isPullMode && ccfg.DeliverSubject == _EMPTY_ { + return nil, ErrPullSubscribeRequired + } + if ccfg.DeliverSubject != _EMPTY_ { deliver = ccfg.DeliverSubject - } else { + } else if !isPullMode { deliver = NewInbox() } - } else { + case (err != nil && !notFoundErr) || (notFoundErr && consumerBound): + // If the consumer is being bound got an error on pull subscribe then allow the error. + if !(isPullMode && lookupErr && consumerBound) { + return nil, err + } + default: + // Attempt to create consumer if not found nor using Bind. shouldCreate = true deliver = NewInbox() if !isPullMode { @@ -1351,6 +1368,8 @@ type subOpts struct { mack bool // For creating or updating. cfg *ConsumerConfig + // For binding a subscription to a consumer without creating it. + bound bool } // ManualAck disables auto ack functionality for async subscriptions. @@ -1362,16 +1381,19 @@ func ManualAck() SubOpt { } // Durable defines the consumer name for JetStream durable subscribers. -func Durable(name string) SubOpt { +func Durable(consumer string) SubOpt { return subOptFn(func(opts *subOpts) error { - if opts.cfg.Durable != "" { + if opts.cfg.Durable != _EMPTY_ { return fmt.Errorf("nats: option Durable set more than once") } - if strings.Contains(name, ".") { + if opts.consumer != _EMPTY_ && opts.consumer != consumer { + return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer) + } + if strings.Contains(consumer, ".") { return ErrInvalidDurableName } - opts.cfg.Durable = name + opts.cfg.Durable = consumer return nil }) } @@ -1482,9 +1504,39 @@ func RateLimit(n uint64) SubOpt { } // BindStream binds a consumer to a stream explicitly based on a name. -func BindStream(name string) SubOpt { +func BindStream(stream string) SubOpt { + return subOptFn(func(opts *subOpts) error { + if opts.stream != _EMPTY_ && opts.stream != stream { + return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) + } + + opts.stream = stream + return nil + }) +} + +// Bind binds a subscription to an existing consumer from a stream without attempting to create. +// The first argument is the stream name and the second argument will be the consumer name. +func Bind(stream, consumer string) SubOpt { return subOptFn(func(opts *subOpts) error { - opts.stream = name + if stream == _EMPTY_ { + return ErrStreamNameRequired + } + if consumer == _EMPTY_ { + return ErrConsumerNameRequired + } + + // In case of pull subscribers, the durable name is a required parameter + // so check that they are not different. + if opts.cfg.Durable != _EMPTY_ && opts.cfg.Durable != consumer { + return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.cfg.Durable, consumer) + } + if opts.stream != _EMPTY_ && opts.stream != stream { + return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) + } + opts.stream = stream + opts.consumer = consumer + opts.bound = true return nil }) } @@ -1925,7 +1977,7 @@ func (m *Msg) Ack(opts ...AckOpt) error { return m.ackReply(ackAck, false, opts...) } -// Ack is the synchronous version of Ack. This indicates successful message +// AckSync is the synchronous version of Ack. This indicates successful message // processing. func (m *Msg) AckSync(opts ...AckOpt) error { return m.ackReply(ackAck, true, opts...) @@ -2147,7 +2199,7 @@ const ( // consumer is created. DeliverNewPolicy - // DeliverByStartTimePolicy will deliver messages starting from a given + // DeliverByStartSequencePolicy will deliver messages starting from a given // sequence. DeliverByStartSequencePolicy diff --git a/vendor/github.com/nats-io/nats.go/jsm.go b/vendor/github.com/nats-io/nats.go/jsm.go index b6a3f8b7..e485ae14 100644 --- a/vendor/github.com/nats-io/nats.go/jsm.go +++ b/vendor/github.com/nats-io/nats.go/jsm.go @@ -146,6 +146,7 @@ type AccountInfo struct { Store uint64 `json:"storage"` Streams int `json:"streams"` Consumers int `json:"consumers"` + Domain string `json:"domain"` API APIStats `json:"api"` Limits AccountLimits `json:"limits"` } @@ -170,6 +171,8 @@ type accountInfoResponse struct { } // AccountInfo retrieves info about the JetStream usage from the current account. +// If JetStream is not enabled, this will return ErrJetStreamNotEnabled +// Other errors can happen but are generally considered retryable func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) { o, cancel, err := getJSContextOpts(js.opts, opts...) if err != nil { @@ -181,6 +184,10 @@ func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) { resp, err := js.nc.RequestWithContext(o.ctx, js.apiSubj(apiAccountInfo), nil) if err != nil { + // todo maybe nats server should never have no responder on this subject and always respond if they know there is no js to be had + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } return nil, err } var info accountInfoResponse diff --git a/vendor/github.com/nats-io/nats.go/nats.go b/vendor/github.com/nats-io/nats.go/nats.go index 88f52810..50216834 100644 --- a/vendor/github.com/nats-io/nats.go/nats.go +++ b/vendor/github.com/nats-io/nats.go/nats.go @@ -146,9 +146,12 @@ var ( ErrInvalidJSAck = errors.New("nats: invalid jetstream publish response") ErrMultiStreamUnsupported = errors.New("nats: multiple streams are not supported") ErrStreamNameRequired = errors.New("nats: stream name is required") + ErrConsumerNameRequired = errors.New("nats: consumer name is required") ErrConsumerConfigRequired = errors.New("nats: consumer configuration is required") ErrStreamSnapshotConfigRequired = errors.New("nats: stream snapshot configuration is required") ErrDeliverSubjectRequired = errors.New("nats: deliver subject is required") + ErrPullSubscribeToPushConsumer = errors.New("nats: cannot pull subscribe to push based consumer") + ErrPullSubscribeRequired = errors.New("nats: must use pull subscribe to bind to pull based consumer") ) func init() { @@ -496,9 +499,6 @@ type Conn struct { respMux *Subscription // A single response subscription respMap map[string]chan *Msg // Request map for the response msg channels respRand *rand.Rand // Used for generating suffix - - // JetStream Contexts last account check. - jsLastCheck time.Time } type natsReader struct { @@ -2482,6 +2482,13 @@ func (nc *Conn) readLoop() { for { buf, err := br.Read() if err == nil { + // With websocket, it is possible that there is no error but + // also no buffer returned (either WS control message or read of a + // partial compressed message). We could call parse(buf) which + // would ignore an empty buffer, but simply go back to top of the loop. + if len(buf) == 0 { + continue + } err = nc.parse(buf) } if err != nil { diff --git a/vendor/github.com/nats-io/nats.go/ws.go b/vendor/github.com/nats-io/nats.go/ws.go index eb0c7d88..4231f102 100644 --- a/vendor/github.com/nats-io/nats.go/ws.go +++ b/vendor/github.com/nats-io/nats.go/ws.go @@ -72,9 +72,6 @@ const ( // From https://tools.ietf.org/html/rfc6455#section-1.3 var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") -// As per https://tools.ietf.org/html/rfc7692#section-7.2.2 -// add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader -// does not report unexpected EOF. var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff} type websocketReader struct { @@ -83,10 +80,16 @@ type websocketReader struct { ib []byte ff bool fc bool - dc io.ReadCloser + dc *wsDecompressor nc *Conn } +type wsDecompressor struct { + flate io.ReadCloser + bufs [][]byte + off int +} + type websocketWriter struct { w io.Writer compress bool @@ -97,57 +100,81 @@ type websocketWriter struct { noMoreSend bool // if true, even if there is a Write() call, we should not send anything } -type decompressorBuffer struct { - buf []byte - rem int - off int - final bool -} - -func newDecompressorBuffer(buf []byte) *decompressorBuffer { - return &decompressorBuffer{buf: buf, rem: len(buf)} -} - -func (d *decompressorBuffer) Read(p []byte) (int, error) { - if d.buf == nil { +func (d *wsDecompressor) Read(dst []byte) (int, error) { + if len(dst) == 0 { + return 0, nil + } + if len(d.bufs) == 0 { return 0, io.EOF } - lim := d.rem - if len(p) < lim { - lim = len(p) + copied := 0 + rem := len(dst) + for buf := d.bufs[0]; buf != nil && rem > 0; { + n := len(buf[d.off:]) + if n > rem { + n = rem + } + copy(dst[copied:], buf[d.off:d.off+n]) + copied += n + rem -= n + d.off += n + buf = d.nextBuf() } - n := copy(p, d.buf[d.off:d.off+lim]) - d.off += n - d.rem -= n - d.checkRem() - return n, nil + return copied, nil } -func (d *decompressorBuffer) checkRem() { - if d.rem != 0 { - return - } - if !d.final { - d.buf = compressFinalBlock - d.off = 0 - d.rem = len(d.buf) - d.final = true - } else { - d.buf = nil - } +func (d *wsDecompressor) nextBuf() []byte { + // We still have remaining data in the first buffer + if d.off != len(d.bufs[0]) { + return d.bufs[0] + } + // We read the full first buffer. Reset offset. + d.off = 0 + // We were at the last buffer, so we are done. + if len(d.bufs) == 1 { + d.bufs = nil + return nil + } + // Here we move to the next buffer. + d.bufs = d.bufs[1:] + return d.bufs[0] } -func (d *decompressorBuffer) ReadByte() (byte, error) { - if d.buf == nil { +func (d *wsDecompressor) ReadByte() (byte, error) { + if len(d.bufs) == 0 { return 0, io.EOF } - b := d.buf[d.off] + b := d.bufs[0][d.off] d.off++ - d.rem-- - d.checkRem() + d.nextBuf() return b, nil } +func (d *wsDecompressor) addBuf(b []byte) { + d.bufs = append(d.bufs, b) +} + +func (d *wsDecompressor) decompress() ([]byte, error) { + d.off = 0 + // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 + // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader + // does not report unexpected EOF. + d.bufs = append(d.bufs, compressFinalBlock) + // Create or reset the decompressor with his object (wsDecompressor) + // that provides Read() and ReadByte() APIs that will consume from + // the compressed buffers (d.bufs). + if d.flate == nil { + d.flate = flate.NewReader(d) + } else { + d.flate.(flate.Resetter).Reset(d, nil) + } + // TODO: When Go 1.15 support is dropped, replace with io.ReadAll() + b, err := ioutil.ReadAll(d.flate) + // Now reset the compressed buffers list + d.bufs = nil + return b, err +} + func wsNewReader(r io.Reader) *websocketReader { return &websocketReader{r: r, ff: true} } @@ -254,29 +281,47 @@ func (r *websocketReader) Read(p []byte) (int, error) { } var b []byte + // This ensures that we get the full payload for this frame. b, pos, err = wsGet(r.r, buf, pos, rem) if err != nil { return 0, err } + // We read the full frame. rem = 0 + addToPending := true if r.fc { - br := newDecompressorBuffer(b) - if r.dc == nil { - r.dc = flate.NewReader(br) - } else { - r.dc.(flate.Resetter).Reset(br, nil) - } - // TODO: When Go 1.15 support is dropped, replace with io.ReadAll() - b, err = ioutil.ReadAll(r.dc) - if err != nil { - return 0, err + // Don't add to pending if we are not dealing with the final frame. + addToPending = r.ff + // Add the compressed payload buffer to the list. + r.addCBuf(b) + // Decompress only when this is the final frame. + if r.ff { + b, err = r.dc.decompress() + if err != nil { + return 0, err + } + r.fc = false } - r.fc = false } - r.pending = append(r.pending, b) + // Add to the pending list if dealing with uncompressed frames or + // after we have received the full compressed message and decompressed it. + if addToPending { + r.pending = append(r.pending, b) + } + } + // In case of compression, there may be nothing to drain + if len(r.pending) > 0 { + return r.drainPending(p), nil + } + return 0, nil +} + +func (r *websocketReader) addCBuf(b []byte) { + if r.dc == nil { + r.dc = &wsDecompressor{} } - // At this point we should have pending slices. - return r.drainPending(p), nil + // Add a copy of the incoming buffer to the list of compressed buffers. + r.dc.addBuf(append([]byte(nil), b...)) } func (r *websocketReader) drainPending(p []byte) int { diff --git a/vendor/github.com/nats-io/stan.go/README.md b/vendor/github.com/nats-io/stan.go/README.md index f87d258e..0af2ce7b 100644 --- a/vendor/github.com/nats-io/stan.go/README.md +++ b/vendor/github.com/nats-io/stan.go/README.md @@ -3,8 +3,8 @@ NATS Streaming is an extremely performant, lightweight reliable streaming platform powered by [NATS](https://nats.io). [![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) -[![Build Status](https://travis-ci.com/nats-io/stan.go.svg?branch=master)](https://travis-ci.com/github/nats-io/stan.go) -[![Coverage Status](https://coveralls.io/repos/nats-io/stan.go/badge.svg?branch=master)](https://coveralls.io/r/nats-io/stan.go?branch=master) +[![Build Status](https://travis-ci.com/nats-io/stan.go.svg?branch=main)](https://travis-ci.com/github/nats-io/stan.go) +[![Coverage Status](https://coveralls.io/repos/nats-io/stan.go/badge.svg?branch=main)](https://coveralls.io/r/nats-io/stan.go?branch=main) [![GoDoc](https://godoc.org/github.com/nats-io/stan.go?status.svg)](http://godoc.org/github.com/nats-io/stan.go) NATS Streaming provides the following high-level feature set: @@ -30,7 +30,7 @@ When using or transitioning to Go modules support: ```bash # Go client latest or explicit version go get github.com/nats-io/stan.go/@latest -go get github.com/nats-io/stan.go/@v0.9.0 +go get github.com/nats-io/stan.go/@v0.10.0 ``` ## Important things to know about reconnections. diff --git a/vendor/github.com/nats-io/stan.go/go_tests.mod b/vendor/github.com/nats-io/stan.go/go_tests.mod index d26a50d8..6f2dc3c9 100644 --- a/vendor/github.com/nats-io/stan.go/go_tests.mod +++ b/vendor/github.com/nats-io/stan.go/go_tests.mod @@ -4,8 +4,8 @@ go 1.14 require ( github.com/gogo/protobuf v1.3.2 - github.com/nats-io/nats-server/v2 v2.2.5 - github.com/nats-io/nats-streaming-server v0.21.3-0.20210521153059-e071c9354f65 - github.com/nats-io/nats.go v1.11.0 + github.com/nats-io/nats-server/v2 v2.3.3 + github.com/nats-io/nats-streaming-server v0.22.0 + github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 github.com/nats-io/nuid v1.0.1 ) diff --git a/vendor/github.com/nats-io/stan.go/go_tests.sum b/vendor/github.com/nats-io/stan.go/go_tests.sum index 7fb01f53..dc15cc9e 100644 --- a/vendor/github.com/nats-io/stan.go/go_tests.sum +++ b/vendor/github.com/nats-io/stan.go/go_tests.sum @@ -5,9 +5,11 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -17,10 +19,12 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -32,6 +36,7 @@ github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -41,6 +46,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.12 h1:famVnQVu7QwryBN4jNseQdUKES71ZAOnB6UQQJPZvqk= github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -50,28 +56,29 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= -github.com/nats-io/nats-server/v2 v2.2.5 h1:OOPaldr3oi0JBpYHiEzsaUce0289YpuVIYjFIfBo030= -github.com/nats-io/nats-server/v2 v2.2.5/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= -github.com/nats-io/nats-streaming-server v0.21.3-0.20210521153059-e071c9354f65 h1:CBuz8Wd0V4j1/ZG7g/3di7YPnWBcySHhEOOW0Iq71UM= -github.com/nats-io/nats-streaming-server v0.21.3-0.20210521153059-e071c9354f65/go.mod h1:WLeptf8OwgKJ+Z9dQCIG8hOJA+9Gjd8Oj6AcWhXRGZE= -github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= -github.com/nats-io/nats.go v1.11.0 h1:L263PZkrmkRJRJT2YHU8GwWWvEvmr9/LUKuJTXsF32k= +github.com/nats-io/jwt/v2 v2.0.3 h1:i/O6cmIsjpcQyWDYNcq2JyZ3/VTF8SJ4JWluI5OhpvI= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= +github.com/nats-io/nats-server/v2 v2.3.3 h1:4++7wFzqYwATPWN1FD9l492TGxdtzDoT0moz2yh7BWg= +github.com/nats-io/nats-server/v2 v2.3.3/go.mod h1:3mtbaN5GkCo/Z5T3nNj0I0/W1fPkKzLiDC6jjWJKp98= +github.com/nats-io/nats-streaming-server v0.22.0 h1:2egnq86o9roTqUfELlqykf7ZZkNvRsXjVf4EbaLysHo= +github.com/nats-io/nats-streaming-server v0.22.0/go.mod h1:Jyu3eUQaUAjwd5TiBuLagKdQRofPrHoIXt1kL0U/e5o= github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 h1:9GqilBhZaR3xYis0JgMlJjNw933WIobdjKhilXm+Vls= +github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nats-io/stan.go v0.8.3/go.mod h1:Ejm8bbHnMTSptU6uNMAVuxeapMJYBB/Ml3ej6z4GoSY= +github.com/nats-io/stan.go v0.9.0/go.mod h1:0jEuBXKauB1HHJswHM/lx05K48TJ1Yxj6VIfM4k+aB4= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -81,6 +88,7 @@ github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3x github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -88,13 +96,13 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a h1:kr2P4QFmQr29mSLA43kwrOcgcReGTfbE9N577tCTuBc= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -117,8 +125,10 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -132,10 +142,12 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= diff --git a/vendor/github.com/nats-io/stan.go/stan.go b/vendor/github.com/nats-io/stan.go/stan.go index a8b5631a..f9252554 100644 --- a/vendor/github.com/nats-io/stan.go/stan.go +++ b/vendor/github.com/nats-io/stan.go/stan.go @@ -26,7 +26,7 @@ import ( ) // Version is the NATS Streaming Go Client version -const Version = "0.9.0" +const Version = "0.10.0" const ( // DefaultNatsURL is the default URL the client connects to @@ -137,8 +137,16 @@ type Options struct { // the NATS streaming connection does NOT close this NATS connection. // It is the responsibility of the application to manage the lifetime of // the supplied NATS connection. + // + // DEPRECATED: Users should provide NATS options through NatsOptions() + // instead to configure the underlying NATS connection. NatsConn *nats.Conn + // NatsOptions is an array of NATS options to configure the NATS connection + // that will be created and owned by the library. Note that some options + // may be overridden by the library. + NatsOptions []nats.Option + // ConnectTimeout is the timeout for the initial Connect(). This value is also // used for some of the internal request/replies with the cluster. ConnectTimeout time.Duration @@ -246,6 +254,9 @@ func MaxPubAcksInflight(max int) Option { // NatsConn is an Option to set the underlying NATS connection to be used // by a streaming connection object. When such option is set, closing the // streaming connection does not close the provided NATS connection. +// +// DEPRECATED: Users should use NatsOptions instead to configure the +// underlying NATS Connection created by the Streaming connection. func NatsConn(nc *nats.Conn) Option { return func(o *Options) error { o.NatsConn = nc @@ -253,6 +264,16 @@ func NatsConn(nc *nats.Conn) Option { } } +// NatsOptions is an Option to provide the NATS options that will be used +// to create the underlying NATS connection to be used by a streaming +// connection object. +func NatsOptions(opts ...nats.Option) Option { + return func(o *Options) error { + o.NatsOptions = append([]nats.Option(nil), opts...) + return nil + } +} + // Pings is an Option to set the ping interval and max out values. // The interval needs to be at least 1 and represents the number // of seconds. @@ -357,15 +378,22 @@ func Connect(stanClusterID, clientID string, options ...Option) (Conn, error) { c.nc = c.opts.NatsConn // Create a NATS connection if it doesn't exist. if c.nc == nil { + nopts := c.opts.NatsOptions + nopts = append(nopts, nats.MaxReconnects(-1), nats.ReconnectBufSize(-1)) + // Set name only if not provided by the user... + var do nats.Options + for _, o := range nopts { + o(&do) + } + if do.Name == "" { + nopts = append(nopts, nats.Name(clientID)) + } // We will set the max reconnect attempts to -1 (infinite) // and the reconnect buffer to -1 to prevent any buffering // (which may cause a published message to be flushed on // reconnect while the API may have returned an error due // to PubAck timeout. - nc, err := nats.Connect(c.opts.NatsURL, - nats.Name(clientID), - nats.MaxReconnects(-1), - nats.ReconnectBufSize(-1)) + nc, err := nats.Connect(c.opts.NatsURL, nopts...) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 616a0d25..fa2bd5b5 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -18,6 +18,8 @@ include Makefile.common ./ttar -C $(dir $*) -x -f $*.ttar touch $@ +fixtures: fixtures/.unpacked + update_fixtures: rm -vf fixtures/.unpacked ./ttar -c -f fixtures.ttar fixtures/ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 3ac29c63..a1b1ca40 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -78,12 +78,12 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.7.0 +PROMU_VERSION ?= 0.12.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.18.0 +GOLANGCI_LINT_VERSION ?= v1.39.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -118,7 +118,7 @@ endif %: common-% ; .PHONY: common-all -common-all: precheck style check_license lint unused build test +common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @@ -198,6 +198,15 @@ else endif endif +.PHONY: common-yamllint +common-yamllint: + @echo ">> running yamllint on all YAML files in the repository" +ifeq (, $(shell which yamllint)) + @echo "yamllint not installed so skipping" +else + yamllint . +endif + # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md index 55d1e326..43c37735 100644 --- a/vendor/github.com/prometheus/procfs/README.md +++ b/vendor/github.com/prometheus/procfs/README.md @@ -6,8 +6,8 @@ metrics from the pseudo-filesystems /proc and /sys. *WARNING*: This package is a work in progress. Its API may still break in backwards-incompatible ways without warnings. Use it at your own risk. -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs) +[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) ## Usage diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go new file mode 100644 index 00000000..bf4f3b48 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/cmdline.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CmdLine returns the command line of the kernel. +func (fs FS) CmdLine() ([]string, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cmdline")) + if err != nil { + return nil, err + } + + return strings.Fields(string(data)), nil +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index e2acd6d4..d31a8260 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -31,7 +31,7 @@ // log.Fatalf("could not get process: %s", err) // } // -// stat, err := p.NewStat() +// stat, err := p.Stat() // if err != nil { // log.Fatalf("could not get process stat: %s", err) // } diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 1e76173d..e7d35069 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -644,6 +644,11 @@ Node 0, zone DMA32 759 572 791 475 194 45 12 0 Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/proc/cmdline +Lines: 1 +BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/cpuinfo Lines: 216 processor : 0 @@ -3455,6 +3460,460 @@ Mode: 664 Directory: fixtures/sys/class Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/drm +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/drm/card0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/drm/card0/device +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/aer_dev_correctable +Lines: 9 +RxErr 0 +BadTLP 0 +BadDLLP 0 +Rollover 0 +Timeout 0 +NonFatalErr 0 +CorrIntErr 0 +HeaderOF 0 +TOTAL_ERR_COR 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/aer_dev_fatal +Lines: 19 +Undefined 0 +DLP 0 +SDES 0 +TLP 0 +FCP 0 +CmpltTO 0 +CmpltAbrt 0 +UnxCmplt 0 +RxOF 0 +MalfTLP 0 +ECRC 0 +UnsupReq 0 +ACSViol 0 +UncorrIntErr 0 +BlockedTLP 0 +AtomicOpBlocked 0 +TLPBlockedErr 0 +PoisonTLPBlocked 0 +TOTAL_ERR_FATAL 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/aer_dev_nonfatal +Lines: 19 +Undefined 0 +DLP 0 +SDES 0 +TLP 0 +FCP 0 +CmpltTO 0 +CmpltAbrt 0 +UnxCmplt 0 +RxOF 0 +MalfTLP 0 +ECRC 0 +UnsupReq 0 +ACSViol 0 +UncorrIntErr 0 +BlockedTLP 0 +AtomicOpBlocked 0 +TLPBlockedErr 0 +PoisonTLPBlocked 0 +TOTAL_ERR_NONFATAL 0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/ari_enabled +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/boot_vga +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/broken_parity_status +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/class +Lines: 1 +0x030000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/consistent_dma_mask_bits +Lines: 1 +44 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/current_link_speed +Lines: 1 +8.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/current_link_width +Lines: 1 +16 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/d3cold_allowed +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/device +Lines: 1 +0x687f +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/dma_mask_bits +Lines: 1 +44 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/driver_override +Lines: 1 +(null) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/enable +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/gpu_busy_percent +Lines: 1 +4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/irq +Lines: 1 +95 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/local_cpulist +Lines: 1 +0-15 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/local_cpus +Lines: 1 +0000ffff +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/max_link_speed +Lines: 1 +8.0 GT/s PCIe +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/max_link_width +Lines: 1 +16 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_total +Lines: 1 +8573157376 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_used +Lines: 1 +144560128 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_total +Lines: 1 +8573157376 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_used +Lines: 1 +1490378752 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/mem_info_vram_total +Lines: 1 +8573157376 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/mem_info_vram_used +Lines: 1 +1490378752 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/mem_info_vram_vendor +Lines: 1 +samsung +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/modalias +Lines: 1 +pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/msi_bus +Lines: 1 +1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/numa_node +Lines: 1 +-1 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pcie_bw +Lines: 1 +6641 815 256 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pcie_replay_count +Lines: 1 +0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/power_dpm_force_performance_level +Lines: 1 +manual +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/power_dpm_state +Lines: 1 +performance +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/power_state +Lines: 1 +D0 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_cur_state +Lines: 1 +1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_dpm_dcefclk +Lines: 5 +0: 600Mhz * +1: 720Mhz +2: 800Mhz +3: 847Mhz +4: 900Mhz +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_dpm_mclk +Lines: 4 +0: 167Mhz * +1: 500Mhz +2: 800Mhz +3: 945Mhz +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_dpm_pcie +Lines: 2 +0: 8.0GT/s, x16 +1: 8.0GT/s, x16 * +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_dpm_sclk +Lines: 8 +0: 852Mhz * +1: 991Mhz +2: 1084Mhz +3: 1138Mhz +4: 1200Mhz +5: 1401Mhz +6: 1536Mhz +7: 1630Mhz +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_dpm_socclk +Lines: 8 +0: 600Mhz +1: 720Mhz * +2: 800Mhz +3: 847Mhz +4: 900Mhz +5: 960Mhz +6: 1028Mhz +7: 1107Mhz +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_features +Lines: 32 +Current ppfeatures: 0x0000000019a1ff4f +FEATURES BITMASK ENABLEMENT +DPM_PREFETCHER 0x0000000000000001 Y +GFXCLK_DPM 0x0000000000000002 Y +UCLK_DPM 0x0000000000000004 Y +SOCCLK_DPM 0x0000000000000008 Y +UVD_DPM 0x0000000000000010 N +VCE_DPM 0x0000000000000020 N +ULV 0x0000000000000040 Y +MP0CLK_DPM 0x0000000000000080 N +LINK_DPM 0x0000000000000100 Y +DCEFCLK_DPM 0x0000000000000200 Y +AVFS 0x0000000000000400 Y +GFXCLK_DS 0x0000000000000800 Y +SOCCLK_DS 0x0000000000001000 Y +LCLK_DS 0x0000000000002000 Y +PPT 0x0000000000004000 Y +TDC 0x0000000000008000 Y +THERMAL 0x0000000000010000 Y +GFX_PER_CU_CG 0x0000000000020000 N +RM 0x0000000000040000 N +DCEFCLK_DS 0x0000000000080000 N +ACDC 0x0000000000100000 N +VR0HOT 0x0000000000200000 Y +VR1HOT 0x0000000000400000 N +FW_CTF 0x0000000000800000 Y +LED_DISPLAY 0x0000000001000000 Y +FAN_CONTROL 0x0000000002000000 N +FAST_PPT 0x0000000004000000 N +DIDT 0x0000000008000000 Y +ACG 0x0000000010000000 Y +PCC_LIMIT 0x0000000020000000 N +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_force_state +Lines: 1 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_mclk_od +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_num_states +Lines: 3 +states: 2 +0 boot +1 performance +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_od_clk_voltage +Lines: 18 +OD_SCLK: +0: 852Mhz 800mV +1: 991Mhz 900mV +2: 1084Mhz 950mV +3: 1138Mhz 1000mV +4: 1200Mhz 1050mV +5: 1401Mhz 1100mV +6: 1536Mhz 1150mV +7: 1630Mhz 1200mV +OD_MCLK: +0: 167Mhz 800mV +1: 500Mhz 800mV +2: 800Mhz 950mV +3: 945Mhz 1100mV +OD_RANGE: +SCLK: 852MHz 2400MHz +MCLK: 167MHz 1500MHz +VDDC: 800mV 1200mV +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_power_profile_mode +Lines: 8 +NUM MODE_NAME BUSY_SET_POINT FPS USE_RLC_BUSY MIN_ACTIVE_LEVEL + 0 BOOTUP_DEFAULT : 70 60 0 0 + 1 3D_FULL_SCREEN*: 70 60 1 3 + 2 POWER_SAVING : 90 60 0 0 + 3 VIDEO : 70 60 0 0 + 4 VR : 70 90 0 0 + 5 COMPUTE : 30 60 0 6 + 6 CUSTOM : 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/pp_sclk_od +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/product_name +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/product_number +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/resource +Lines: 13 +0x0000007c00000000 0x0000007dffffffff 0x000000000014220c +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000007e00000000 0x0000007e0fffffff 0x000000000014220c +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x000000000000d000 0x000000000000d0ff 0x0000000000040101 +0x00000000fcd00000 0x00000000fcd7ffff 0x0000000000040200 +0x00000000fcd80000 0x00000000fcd9ffff 0x0000000000046200 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +0x0000000000000000 0x0000000000000000 0x0000000000000000 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/revision +Lines: 1 +0xc1 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/serial_number +Lines: 1 + +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/subsystem_device +Lines: 1 +0x04c4 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/subsystem_vendor +Lines: 1 +0x1043 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/thermal_throttling_logging +Lines: 1 +0000:09:00.0: thermal throttling logging enabled, with interval 60 seconds +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/uevent +Lines: 6 +DRIVER=amdgpu +PCI_CLASS=30000 +PCI_ID=1002:687F +PCI_SUBSYS_ID=1043:04C4 +PCI_SLOT_NAME=0000:09:00.0 +MODALIAS=pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/unique_id +Lines: 1 +0123456789abcdef +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/vbios_version +Lines: 1 +115-D050PIL-100 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/drm/card0/device/vendor +Lines: 1 +0x1002 +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/fc_host Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -3592,152 +4051,274 @@ Mode: 644 Directory: fixtures/sys/class/infiniband Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 +Directory: fixtures/sys/class/infiniband/hfi1_0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver +Path: fixtures/sys/class/infiniband/hfi1_0/board_id Lines: 1 -2.31.5050 +HPE 100Gb 1-port OP101 QSFP28 x16 PCIe Gen3 with Intel Omni-Path Adapter Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Path: fixtures/sys/class/infiniband/hfi1_0/fw_ver Lines: 1 -MT4099 +1.27.0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports +Directory: fixtures/sys/class/infiniband/hfi1_0/ports Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 +Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters +Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/VL15_dropped Lines: 1 0 -Mode: 664 +Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/excessive_buffer_overrun_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_downed Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_error_recovery Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/local_link_integrity_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_constraint_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_data Lines: 1 -2221223609 +345091702026 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_packets Lines: 1 -87169372 +638036947 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_remote_physical_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_switch_relay_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_constraint_errors Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_data Lines: 1 -26509113295 +273558326543 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_discards Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_packets Lines: 1 -85734114 +568318856 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_wait Lines: 1 -3599 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/symbol_error Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/phys_state Lines: 1 5: LinkUp Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/rate Lines: 1 -40 Gb/sec (4X QDR) +100 Gb/sec (4X EDR) Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/state Lines: 1 4: ACTIVE Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters +Directory: fixtures/sys/class/infiniband/mlx4_0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Path: fixtures/sys/class/infiniband/mlx4_0/board_id Lines: 1 -0 +SM_1141000001000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver Lines: 1 -0 +2.31.5050 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/hca_type +Lines: 1 +MT4099 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data +Lines: 1 +2221223609 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets +Lines: 1 +87169372 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data +Lines: 1 +26509113295 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets +Lines: 1 +85734114 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait +Lines: 1 +3599 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state +Lines: 1 +5: LinkUp +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate +Lines: 1 +40 Gb/sec (4X QDR) +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state +Lines: 1 +4: ACTIVE +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped +Lines: 1 +0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors +Lines: 1 +0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed +Lines: 1 +0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery @@ -3960,6 +4541,32 @@ Lines: 1 1 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/nvme +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/nvme/nvme0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/nvme/nvme0/firmware_rev +Lines: 1 +1B2QEXP7 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/nvme/nvme0/model +Lines: 1 +Samsung SSD 970 PRO 512GB +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/nvme/nvme0/serial +Lines: 1 +S680HF8N190894I +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/nvme/nvme0/state +Lines: 1 +live +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/power_supply Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -4164,6 +4771,33 @@ Path: fixtures/sys/class/powercap/intel-rapl:a/uevent Lines: 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/class/scsi_tape +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/nst0 +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/nst0a +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/nst0l +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/nst0m +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/st0 +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/st0a +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/st0l +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/class/scsi_tape/st0m +SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/thermal Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -4575,6 +5209,475 @@ Mode: 444 Directory: fixtures/sys/devices/pci0000:00 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight +Lines: 1 +1EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns +Lines: 1 +9247011087720EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt +Lines: 1 +1409EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt +Lines: 1 +979383912EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt +Lines: 1 +3741EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns +Lines: 1 +33788355744EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt +Lines: 1 +19EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt +Lines: 1 +1496246784000EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt +Lines: 1 +53772916EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns +Lines: 1 +5233597394395EOF +Mode: 444 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -4978,35 +6081,6 @@ Mode: 644 Directory: fixtures/sys/devices/system Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node1/vmstat -Lines: 6 -nr_free_pages 1 -nr_zone_inactive_anon 2 -nr_zone_active_anon 3 -nr_zone_inactive_file 4 -nr_zone_active_file 5 -nr_zone_unevictable 6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node2/vmstat -Lines: 6 -nr_free_pages 7 -nr_zone_inactive_anon 8 -nr_zone_active_anon 9 -nr_zone_inactive_file 10 -nr_zone_active_file 11 -nr_zone_unevictable 12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/devices/system/clocksource Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -5254,6 +6328,35 @@ Mode: 644 Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node1 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node1/vmstat +Lines: 6 +nr_free_pages 1 +nr_zone_inactive_anon 2 +nr_zone_active_anon 3 +nr_zone_inactive_file 4 +nr_zone_active_file 5 +nr_zone_unevictable 6 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/sys/devices/system/node/node2 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/sys/devices/system/node/node2/vmstat +Lines: 6 +nr_free_pages 7 +nr_zone_inactive_anon 8 +nr_zone_active_anon 9 +nr_zone_inactive_file 10 +nr_zone_active_file 11 +nr_zone_unevictable 12 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 4c4493bf..f0b9e5f7 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -22,9 +22,12 @@ import ( ) var ( - statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) + recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) + recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) ) // MDStat holds info parsed from /proc/mdstat. @@ -39,12 +42,20 @@ type MDStat struct { DisksTotal int64 // Number of failed disks. DisksFailed int64 + // Number of "down" disks. (the _ indicator in the status line) + DisksDown int64 // Spare disks in the device. DisksSpare int64 // Number of blocks the device holds. BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // progress percentage of current sync + BlocksSyncedPct float64 + // estimated finishing time for current sync (in minutes) + BlocksSyncedFinishTime float64 + // current sync speed (in Kilobytes/sec) + BlocksSyncedSpeed float64 // Name of md component devices Devices []string } @@ -91,7 +102,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Failed disks have the suffix (F) & Spare disks have the suffix (S). fail := int64(strings.Count(line, "(F)")) spare := int64(strings.Count(line, "(S)")) - active, total, size, err := evalStatusLine(lines[i], lines[i+1]) + active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { return nil, fmt.Errorf("error parsing md device lines: %w", err) @@ -105,6 +116,9 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. syncedBlocks := size + speed := float64(0) + finish := float64(0) + pct := float64(0) recovering := strings.Contains(lines[syncLineIdx], "recovery") resyncing := strings.Contains(lines[syncLineIdx], "resync") checking := strings.Contains(lines[syncLineIdx], "check") @@ -124,7 +138,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { strings.Contains(lines[syncLineIdx], "DELAYED") { syncedBlocks = 0 } else { - syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) + syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) } @@ -132,69 +146,104 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } mdStats = append(mdStats, MDStat{ - Name: mdName, - ActivityState: state, - DisksActive: active, - DisksFailed: fail, - DisksSpare: spare, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - Devices: evalComponentDevices(deviceFields), + Name: mdName, + ActivityState: state, + DisksActive: active, + DisksFailed: fail, + DisksDown: down, + DisksSpare: spare, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + BlocksSyncedPct: pct, + BlocksSyncedFinishTime: finish, + BlocksSyncedSpeed: speed, + Devices: evalComponentDevices(deviceFields), }) } return mdStats, nil } -func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { +func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { sizeStr := strings.Fields(statusLine)[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { // In the device deviceLine, only disks have a number associated with them in []. total = int64(strings.Count(deviceLine, "[")) - return total, total, size, nil + return total, total, 0, size, nil } if strings.Contains(deviceLine, "inactive") { - return 0, 0, size, nil + return 0, 0, 0, size, nil } matches := statusLineRE.FindStringSubmatch(statusLine) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + if len(matches) != 5 { + return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) } + down = int64(strings.Count(matches[4], "_")) - return active, total, size, nil + return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { - matches := recoveryLineRE.FindStringSubmatch(recoveryLine) +func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { + matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) } syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) if err != nil { - return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) } - return syncedBlocks, nil + // Get percentage complete + matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + } + pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) + if err != nil { + return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + } + + // Get time expected left to complete + matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + } + finish, err = strconv.ParseFloat(matches[1], 64) + if err != nil { + return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + } + + // Get recovery speed + matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) + if len(matches) != 2 { + return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + } + speed, err = strconv.ParseFloat(matches[1], 64) + if err != nil { + return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + } + + return syncedBlocks, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index ac01dd84..8c9ee3de 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -65,6 +65,7 @@ type ( TxQueue uint64 RxQueue uint64 UID uint64 + Inode uint64 } ) @@ -150,9 +151,9 @@ func parseIP(hexIP string) (net.IP, error) { // parseNetIPSocketLine parses a single line, represented by a list of fields. func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { line := &netIPSocketLine{} - if len(fields) < 8 { + if len(fields) < 10 { return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 8 columns %q", + "cannot parse net socket line as it has less then 10 columns %q", strings.Join(fields, " "), ) } @@ -216,5 +217,10 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) } + // inode + if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { + return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + } + return line, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index 0094a13c..be45b798 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -90,7 +90,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) { // control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, // so the len of the returned struct is equal to the number of active hierarchies on this system func (p Proc) Cgroups() ([]Cgroup, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) + data, err := util.ReadFileNoStat(p.path("cgroup")) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 67ca0e9f..d3a860e4 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -100,6 +100,15 @@ type ProcStat struct { VSize uint // Resident set size in pages. RSS int + // Soft limit in bytes on the rss of the process. + RSSLimit uint64 + // Real-time scheduling priority, a number in the range 1 to 99 for processes + // scheduled under a real-time policy, or 0, for non-real-time processes. + RTPriority uint + // Scheduling policy. + Policy uint + // Aggregated block I/O delays, measured in clock ticks (centiseconds). + DelayAcctBlkIOTicks uint64 proc fs.FS } @@ -155,6 +164,24 @@ func (p Proc) Stat() (ProcStat, error) { &s.Starttime, &s.VSize, &s.RSS, + &s.RSSLimit, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &ignore, + &s.RTPriority, + &s.Policy, + &s.DelayAcctBlkIOTicks, ) if err != nil { return ProcStat{}, err diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index 0b9bb679..209e2ac9 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -99,7 +99,6 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { continue } if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { - zoneinfoElement.Zone = "" continue } parts := strings.Fields(strings.TrimSpace(line)) diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 3bcd8cba..18312f00 100644 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -3,3 +3,5 @@ *.swp /bin/ cover.out +/.idea +*.iml diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml index 257dfdfe..452601e4 100644 --- a/vendor/go.etcd.io/bbolt/.travis.yml +++ b/vendor/go.etcd.io/bbolt/.travis.yml @@ -4,9 +4,10 @@ go_import_path: go.etcd.io/bbolt sudo: false go: -- 1.12 +- 1.15 before_install: +- go get -v golang.org/x/sys/unix - go get -v honnef.co/go/tools/... - go get -v github.com/kisielk/errcheck diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 2968aaa6..21ecf48f 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -2,8 +2,6 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" -default: build - race: @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" @echo "array freelist test" diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index c9e64b1a..f1b4a7b2 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -908,12 +908,14 @@ Below is a list of public, open source projects that use Bolt: * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. * [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. @@ -938,9 +940,8 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. * [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service * [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 2938fed5..4e5f65cc 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -7,6 +7,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/unix" ) // flock acquires an advisory lock on a file descriptor. @@ -49,13 +51,13 @@ func funlock(db *DB) error { // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } // Advise the kernel that the mmap is accessed randomly. - err = madvise(b, syscall.MADV_RANDOM) + err = unix.Madvise(b, syscall.MADV_RANDOM) if err != nil && err != syscall.ENOSYS { // Ignore not implemented error in kernel because it still works. return fmt.Errorf("madvise: %s", err) @@ -76,18 +78,9 @@ func munmap(db *DB) error { } // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) + err := unix.Munmap(db.dataref) db.dataref = nil db.data = nil db.datasz = 0 return err } - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go new file mode 100644 index 00000000..e4fe91b0 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/compact.go @@ -0,0 +1,114 @@ +package bbolt + +// Compact will create a copy of the source DB and in the destination DB. This may +// reclaim space that the source database no longer has use for. txMaxSize can be +// used to limit the transactions size of this process and may trigger intermittent +// commits. A value of zero will ignore transaction sizes. +// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349 +func Compact(dst, src *DB, txMaxSize int64) error { + // commit regularly, or we'll run out of memory for large datasets if using one transaction. + var size int64 + tx, err := dst.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { + // On each key/value, check if we have exceeded tx size. + sz := int64(len(k) + len(v)) + if size+sz > txMaxSize && txMaxSize != 0 { + // Commit previous transaction. + if err := tx.Commit(); err != nil { + return err + } + + // Start new transaction. + tx, err = dst.Begin(true) + if err != nil { + return err + } + size = 0 + } + size += sz + + // Create bucket on the root transaction if this is the first level. + nk := len(keys) + if nk == 0 { + bkt, err := tx.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Create buckets on subsequent levels, if necessary. + b := tx.Bucket(keys[0]) + if nk > 1 { + for _, k := range keys[1:] { + b = b.Bucket(k) + } + } + + // Fill the entire page for best compaction. + b.FillPercent = 1.0 + + // If there is no value then this is a bucket call. + if v == nil { + bkt, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := bkt.SetSequence(seq); err != nil { + return err + } + return nil + } + + // Otherwise treat it as a key/value pair. + return b.Put(k, v) + }); err != nil { + return err + } + + return tx.Commit() +} + +// walkFunc is the type of the function called for keys (buckets and "normal" +// values) discovered by Walk. keys is the list of keys to descend to the bucket +// owning the discovered key/value pair k/v. +type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error + +// walk walks recursively the bolt database db, calling walkFn for each key it finds. +func walk(db *DB, walkFn walkFunc) error { + return db.View(func(tx *Tx) error { + return tx.ForEach(func(name []byte, b *Bucket) error { + return walkBucket(b, nil, name, nil, b.Sequence(), walkFn) + }) + }) +} + +func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { + // Execute callback. + if err := fn(keypath, k, v, seq); err != nil { + return err + } + + // If this is not a bucket then stop. + if v != nil { + return nil + } + + // Iterate over each child key/value. + keypath = append(keypath, k) + return b.ForEach(func(k, v []byte) error { + if v == nil { + bkt := b.Bucket(k) + return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) + } + return walkBucket(b, keypath, k, v, b.Sequence(), fn) + }) +} diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 80b0095c..a798c390 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -120,6 +120,12 @@ type DB struct { // of truncate() and fsync() when growing the data file. AllocSize int + // Mlock locks database file in memory when set to true. + // It prevents major page faults, however used memory can't be reclaimed. + // + // Supported only on Unix via mlock/munlock syscalls. + Mlock bool + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -188,6 +194,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.MmapFlags = options.MmapFlags db.NoFreelistSync = options.NoFreelistSync db.FreelistType = options.FreelistType + db.Mlock = options.Mlock // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -337,7 +344,8 @@ func (db *DB) mmap(minsz int) error { } // Ensure the size is at least the minimum size. - var size = int(info.Size()) + fileSize := int(info.Size()) + var size = fileSize if size < minsz { size = minsz } @@ -346,6 +354,13 @@ func (db *DB) mmap(minsz int) error { return err } + if db.Mlock { + // Unlock db memory + if err := db.munlock(fileSize); err != nil { + return err + } + } + // Dereference all mmap references before unmapping. if db.rwtx != nil { db.rwtx.root.dereference() @@ -361,6 +376,13 @@ func (db *DB) mmap(minsz int) error { return err } + if db.Mlock { + // Don't allow swapping of data file + if err := db.mlock(fileSize); err != nil { + return err + } + } + // Save references to the meta pages. db.meta0 = db.page(0).meta() db.meta1 = db.page(1).meta() @@ -422,12 +444,36 @@ func (db *DB) mmapSize(size int) (int, error) { return int(sz), nil } +func (db *DB) munlock(fileSize int) error { + if err := munlock(db, fileSize); err != nil { + return fmt.Errorf("munlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mlock(fileSize int) error { + if err := mlock(db, fileSize); err != nil { + return fmt.Errorf("mlock error: " + err.Error()) + } + return nil +} + +func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error { + if err := db.munlock(fileSizeFrom); err != nil { + return err + } + if err := db.mlock(fileSizeTo); err != nil { + return err + } + return nil +} + // init creates a new database file and initializes its meta pages. func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) + p := db.pageInBuffer(buf, pgid(i)) p.id = pgid(i) p.flags = metaPageFlag @@ -444,13 +490,13 @@ func (db *DB) init() error { } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) + p := db.pageInBuffer(buf, pgid(2)) p.id = pgid(2) p.flags = freelistPageFlag p.count = 0 // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) + p = db.pageInBuffer(buf, pgid(3)) p.id = pgid(3) p.flags = leafPageFlag p.count = 0 @@ -462,6 +508,7 @@ func (db *DB) init() error { if err := fdatasync(db); err != nil { return err } + db.filesz = len(buf) return nil } @@ -973,6 +1020,12 @@ func (db *DB) grow(sz int) error { if err := db.file.Sync(); err != nil { return fmt.Errorf("file sync error: %s", err) } + if db.Mlock { + // unlock old file and lock new one + if err := db.mrelock(db.filesz, sz); err != nil { + return fmt.Errorf("mlock/munlock error: %s", err) + } + } } db.filesz = sz @@ -1064,6 +1117,11 @@ type Options struct { // OpenFile is used to open files. It defaults to os.OpenFile. This option // is useful for writing hermetic tests. OpenFile func(string, int, os.FileMode) (*os.File, error) + + // Mlock locks database file in memory when set to true. + // It prevents potential page faults, however + // used memory can't be reclaimed. (UNIX only) + Mlock bool } // DefaultOptions represent the options used if nil options are passed into Open(). diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go index 02ef2be0..dbd67a1e 100644 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go @@ -4,7 +4,7 @@ import "sort" // hashmapFreeCount returns count of free pages(hashmap version) func (f *freelist) hashmapFreeCount() int { - // use the forwardmap to get the total count + // use the forwardMap to get the total count count := 0 for _, size := range f.forwardMap { count += int(size) @@ -41,7 +41,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { for pid := range bm { // remove the initial - f.delSpan(pid, uint64(size)) + f.delSpan(pid, size) f.allocs[pid] = txid @@ -51,7 +51,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { f.addSpan(pid+pgid(n), remain) for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+pgid(i)) + delete(f.cache, pid+i) } return pid } diff --git a/vendor/go.etcd.io/bbolt/go.mod b/vendor/go.etcd.io/bbolt/go.mod index c2366dae..96355a69 100644 --- a/vendor/go.etcd.io/bbolt/go.mod +++ b/vendor/go.etcd.io/bbolt/go.mod @@ -2,4 +2,4 @@ module go.etcd.io/bbolt go 1.12 -require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 +require golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d diff --git a/vendor/go.etcd.io/bbolt/go.sum b/vendor/go.etcd.io/bbolt/go.sum index 4ad15a48..c13f8f47 100644 --- a/vendor/go.etcd.io/bbolt/go.sum +++ b/vendor/go.etcd.io/bbolt/go.sum @@ -1,2 +1,2 @@ -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d h1:L/IKR6COd7ubZrs2oTnTi73IhgqJ71c9s80WsQnh0Es= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go new file mode 100644 index 00000000..6a6c7b35 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -0,0 +1,36 @@ +// +build !windows + +package bbolt + +import "golang.org/x/sys/unix" + +// mlock locks memory of db file +func mlock(db *DB, fileSize int) error { + sizeToLock := fileSize + if sizeToLock > db.datasz { + // Can't lock more than mmaped slice + sizeToLock = db.datasz + } + if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil { + return err + } + return nil +} + +//munlock unlocks memory of db file +func munlock(db *DB, fileSize int) error { + if db.dataref == nil { + return nil + } + + sizeToUnlock := fileSize + if sizeToUnlock > db.datasz { + // Can't unlock more than mmaped slice + sizeToUnlock = db.datasz + } + + if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil { + return err + } + return nil +} diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go new file mode 100644 index 00000000..b4a36a49 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/mlock_windows.go @@ -0,0 +1,11 @@ +package bbolt + +// mlock locks memory of db file +func mlock(_ *DB, _ int) error { + panic("mlock is supported only on UNIX systems") +} + +//munlock unlocks memory of db file +func munlock(_ *DB, _ int) error { + panic("munlock is supported only on UNIX systems") +} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 4b1a64a8..869d4120 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -188,7 +188,6 @@ func (tx *Tx) Commit() error { } // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. if tx.db.StrictMode { ch := tx.Check() var errs []string @@ -393,7 +392,7 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { return err } - err = tx.Copy(f) + _, err = tx.WriteTo(f) if err != nil { _ = f.Close() return err diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go new file mode 100644 index 00000000..9d3fffa8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -0,0 +1,789 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses +// are signed messages attesting to the validity of a certificate for a small +// period of time. This is used to manage revocation for X.509 certificates. +package ocsp // import "golang.org/x/crypto/ocsp" + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "strconv" + "time" +) + +var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) + +// ResponseStatus contains the result of an OCSP request. See +// https://tools.ietf.org/html/rfc6960#section-2.3 +type ResponseStatus int + +const ( + Success ResponseStatus = 0 + Malformed ResponseStatus = 1 + InternalError ResponseStatus = 2 + TryLater ResponseStatus = 3 + // Status code four is unused in OCSP. See + // https://tools.ietf.org/html/rfc6960#section-4.2.1 + SignatureRequired ResponseStatus = 5 + Unauthorized ResponseStatus = 6 +) + +func (r ResponseStatus) String() string { + switch r { + case Success: + return "success" + case Malformed: + return "malformed" + case InternalError: + return "internal error" + case TryLater: + return "try later" + case SignatureRequired: + return "signature required" + case Unauthorized: + return "unauthorized" + default: + return "unknown OCSP status: " + strconv.Itoa(int(r)) + } +} + +// ResponseError is an error that may be returned by ParseResponse to indicate +// that the response itself is an error, not just that it's indicating that a +// certificate is revoked, unknown, etc. +type ResponseError struct { + Status ResponseStatus +} + +func (r ResponseError) Error() string { + return "ocsp: error from server: " + r.Status.String() +} + +// These are internal structures that reflect the ASN.1 structure of an OCSP +// response. See RFC 2560, section 4.2. + +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// https://tools.ietf.org/html/rfc2560#section-4.1.1 +type ocspRequest struct { + TBSRequest tbsRequest +} + +type tbsRequest struct { + Version int `asn1:"explicit,tag:0,default:0,optional"` + RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` + RequestList []request +} + +type request struct { + Cert certID +} + +type responseASN1 struct { + Status asn1.Enumerated + Response responseBytes `asn1:"explicit,tag:0,optional"` +} + +type responseBytes struct { + ResponseType asn1.ObjectIdentifier + Response []byte +} + +type basicResponse struct { + TBSResponseData responseData + SignatureAlgorithm pkix.AlgorithmIdentifier + Signature asn1.BitString + Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` +} + +type responseData struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0,explicit,tag:0"` + RawResponderID asn1.RawValue + ProducedAt time.Time `asn1:"generalized"` + Responses []singleResponse +} + +type singleResponse struct { + CertID certID + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` + SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` +} + +type revokedInfo struct { + RevocationTime time.Time `asn1:"generalized"` + Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` +} + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} +) + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +var signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash +}{ + {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType x509.PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = x509.RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.RawValue{ + Tag: 5, + } + + case *ecdsa.PublicKey: + pubType = x509.ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// TODO(agl): this is taken from crypto/x509 and so should probably be exported +// from crypto/x509 or crypto/x509/pkix. +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { + for _, details := range signatureAlgorithmDetails { + if oid.Equal(details.oid) { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { + for hash, oid := range hashOIDs { + if hash == target { + return oid + } + } + return nil +} + +// This is the exposed reflection of the internal OCSP structures. + +// The status values that can be expressed in OCSP. See RFC 6960. +const ( + // Good means that the certificate is valid. + Good = iota + // Revoked means that the certificate has been deliberately revoked. + Revoked + // Unknown means that the OCSP responder doesn't know about the certificate. + Unknown + // ServerFailed is unused and was never used (see + // https://go-review.googlesource.com/#/c/18944). ParseResponse will + // return a ResponseError when an error response is parsed. + ServerFailed +) + +// The enumerated reasons for revoking a certificate. See RFC 5280. +const ( + Unspecified = 0 + KeyCompromise = 1 + CACompromise = 2 + AffiliationChanged = 3 + Superseded = 4 + CessationOfOperation = 5 + CertificateHold = 6 + + RemoveFromCRL = 8 + PrivilegeWithdrawn = 9 + AACompromise = 10 +) + +// Request represents an OCSP request. See RFC 6960. +type Request struct { + HashAlgorithm crypto.Hash + IssuerNameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// Marshal marshals the OCSP request to ASN.1 DER encoded form. +func (req *Request) Marshal() ([]byte, error) { + hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm) + if hashAlg == nil { + return nil, errors.New("Unknown hash algorithm") + } + return asn1.Marshal(ocspRequest{ + tbsRequest{ + Version: 0, + RequestList: []request{ + { + Cert: certID{ + pkix.AlgorithmIdentifier{ + Algorithm: hashAlg, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + req.IssuerNameHash, + req.IssuerKeyHash, + req.SerialNumber, + }, + }, + }, + }, + }) +} + +// Response represents an OCSP response containing a single SingleResponse. See +// RFC 6960. +type Response struct { + // Status is one of {Good, Revoked, Unknown} + Status int + SerialNumber *big.Int + ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time + RevocationReason int + Certificate *x509.Certificate + // TBSResponseData contains the raw bytes of the signed response. If + // Certificate is nil then this can be used to verify Signature. + TBSResponseData []byte + Signature []byte + SignatureAlgorithm x509.SignatureAlgorithm + + // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash. + // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512. + // If zero, the default is crypto.SHA1. + IssuerHash crypto.Hash + + // RawResponderName optionally contains the DER-encoded subject of the + // responder certificate. Exactly one of RawResponderName and + // ResponderKeyHash is set. + RawResponderName []byte + // ResponderKeyHash optionally contains the SHA-1 hash of the + // responder's public key. Exactly one of RawResponderName and + // ResponderKeyHash is set. + ResponderKeyHash []byte + + // Extensions contains raw X.509 extensions from the singleExtensions field + // of the OCSP response. When parsing certificates, this can be used to + // extract non-critical extensions that are not parsed by this package. When + // marshaling OCSP responses, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing certificates, see + // Extensions. + ExtraExtensions []pkix.Extension +} + +// These are pre-serialized error responses for the various non-success codes +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP +// responder that supports only pre-signed responses as a response to requests +// for certificates with unknown status. See RFC 5019. +var ( + MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} +) + +// CheckSignatureFrom checks that the signature in resp is a valid signature +// from issuer. This should only be used if resp.Certificate is nil. Otherwise, +// the OCSP response contained an intermediate certificate that created the +// signature. That signature is checked by ParseResponse and only +// resp.Certificate remains to be validated. +func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { + return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) +} + +// ParseError results from an invalid OCSP response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// ParseRequest parses an OCSP request in DER form. It only supports +// requests for a single certificate. Signed requests are not supported. +// If a request includes a signature, it will result in a ParseError. +func ParseRequest(bytes []byte) (*Request, error) { + var req ocspRequest + rest, err := asn1.Unmarshal(bytes, &req) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP request") + } + + if len(req.TBSRequest.RequestList) == 0 { + return nil, ParseError("OCSP request contains no request body") + } + innerRequest := req.TBSRequest.RequestList[0] + + hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("OCSP request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: innerRequest.Cert.NameHash, + IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, + SerialNumber: innerRequest.Cert.SerialNumber, + }, nil +} + +// ParseResponse parses an OCSP response in DER form. The response must contain +// only one certificate status. To parse the status of a specific certificate +// from a response which may contain multiple statuses, use ParseResponseForCert +// instead. +// +// If the response contains an embedded certificate, then that certificate will +// be used to verify the response signature. If the response contains an +// embedded certificate and issuer is not nil, then issuer will be used to verify +// the signature on the embedded certificate. +// +// If the response does not contain an embedded certificate and issuer is not +// nil, then issuer will be used to verify the response signature. +// +// Invalid responses and parse failures will result in a ParseError. +// Error responses will result in a ResponseError. +func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { + return ParseResponseForCert(bytes, nil, issuer) +} + +// ParseResponseForCert acts identically to ParseResponse, except it supports +// parsing responses that contain multiple statuses. If the response contains +// multiple statuses and cert is not nil, then ParseResponseForCert will return +// the first status which contains a matching serial, otherwise it will return an +// error. If cert is nil, then the first status in the response will be returned. +func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) { + var resp responseASN1 + rest, err := asn1.Unmarshal(bytes, &resp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if status := ResponseStatus(resp.Status); status != Success { + return nil, ResponseError{status} + } + + if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { + return nil, ParseError("bad OCSP response type") + } + + var basicResp basicResponse + rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 { + return nil, ParseError("OCSP response contains bad number of responses") + } + + var singleResp singleResponse + if cert == nil { + singleResp = basicResp.TBSResponseData.Responses[0] + } else { + match := false + for _, resp := range basicResp.TBSResponseData.Responses { + if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 { + singleResp = resp + match = true + break + } + } + if !match { + return nil, ParseError("no response matching the supplied certificate") + } + } + + ret := &Response{ + TBSResponseData: basicResp.TBSResponseData.Raw, + Signature: basicResp.Signature.RightAlign(), + SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), + Extensions: singleResp.SingleExtensions, + SerialNumber: singleResp.CertID.SerialNumber, + ProducedAt: basicResp.TBSResponseData.ProducedAt, + ThisUpdate: singleResp.ThisUpdate, + NextUpdate: singleResp.NextUpdate, + } + + // Handle the ResponderID CHOICE tag. ResponderID can be flattened into + // TBSResponseData once https://go-review.googlesource.com/34503 has been + // released. + rawResponderID := basicResp.TBSResponseData.RawResponderID + switch rawResponderID.Tag { + case 1: // Name + var rdn pkix.RDNSequence + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder name") + } + ret.RawResponderName = rawResponderID.Bytes + case 2: // KeyHash + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder key hash") + } + default: + return nil, ParseError("invalid responder id tag") + } + + if len(basicResp.Certificates) > 0 { + // Responders should only send a single certificate (if they + // send any) that connects the responder's certificate to the + // original issuer. We accept responses with multiple + // certificates due to a number responders sending them[1], but + // ignore all but the first. + // + // [1] https://github.com/golang/go/issues/21527 + ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) + if err != nil { + return nil, err + } + + if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { + return nil, ParseError("bad signature on embedded certificate: " + err.Error()) + } + + if issuer != nil { + if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + } else if issuer != nil { + if err := ret.CheckSignatureFrom(issuer); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + + for _, ext := range singleResp.SingleExtensions { + if ext.Critical { + return nil, ParseError("unsupported critical extension") + } + } + + for h, oid := range hashOIDs { + if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) { + ret.IssuerHash = h + break + } + } + if ret.IssuerHash == 0 { + return nil, ParseError("unsupported issuer hash algorithm") + } + + switch { + case bool(singleResp.Good): + ret.Status = Good + case bool(singleResp.Unknown): + ret.Status = Unknown + default: + ret.Status = Revoked + ret.RevokedAt = singleResp.Revoked.RevocationTime + ret.RevocationReason = int(singleResp.Revoked.Reason) + } + + return ret, nil +} + +// RequestOptions contains options for constructing OCSP requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the OCSP request. If zero, SHA-1 will be used. + Hash crypto.Hash +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + // SHA-1 is nearly universally used in OCSP. + return crypto.SHA1 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + // OCSP seems to be the only place where these raw hash identifiers are + // used. I took the following from + // http://msdn.microsoft.com/en-us/library/ff635603.aspx + _, ok := hashOIDs[hashFunc] + if !ok { + return nil, x509.ErrUnsupportedAlgorithm + } + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + req := &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: cert.SerialNumber, + } + return req.Marshal() +} + +// CreateResponse returns a DER-encoded OCSP response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the responder's name field, and the +// certificate itself is provided alongside the OCSP response signature. +// +// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. +// +// The template is used to populate the SerialNumber, Status, RevokedAt, +// RevocationReason, ThisUpdate, and NextUpdate fields. +// +// If template.IssuerHash is not set, SHA1 will be used. +// +// The ProducedAt date is automatically set to the current date, to the nearest minute. +func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + if template.IssuerHash == 0 { + template.IssuerHash = crypto.SHA1 + } + hashOID := getOIDFromHashAlgorithm(template.IssuerHash) + if hashOID == nil { + return nil, errors.New("unsupported issuer hash algorithm") + } + + if !template.IssuerHash.Available() { + return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash) + } + h := template.IssuerHash.New() + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + innerResponse := singleResponse{ + CertID: certID{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + NameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: template.SerialNumber, + }, + ThisUpdate: template.ThisUpdate.UTC(), + NextUpdate: template.NextUpdate.UTC(), + SingleExtensions: template.ExtraExtensions, + } + + switch template.Status { + case Good: + innerResponse.Good = true + case Unknown: + innerResponse.Unknown = true + case Revoked: + innerResponse.Revoked = revokedInfo{ + RevocationTime: template.RevokedAt.UTC(), + Reason: asn1.Enumerated(template.RevocationReason), + } + } + + rawResponderID := asn1.RawValue{ + Class: 2, // context-specific + Tag: 1, // Name (explicit tag) + IsCompound: true, + Bytes: responderCert.RawSubject, + } + tbsResponseData := responseData{ + Version: 0, + RawResponderID: rawResponderID, + ProducedAt: time.Now().Truncate(time.Minute).UTC(), + Responses: []singleResponse{innerResponse}, + } + + tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) + if err != nil { + return nil, err + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + responseHash := hashFunc.New() + responseHash.Write(tbsResponseDataDER) + signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) + if err != nil { + return nil, err + } + + response := basicResponse{ + TBSResponseData: tbsResponseData, + SignatureAlgorithm: signatureAlgorithm, + Signature: asn1.BitString{ + Bytes: signature, + BitLength: 8 * len(signature), + }, + } + if template.Certificate != nil { + response.Certificates = []asn1.RawValue{ + {FullBytes: template.Certificate.Raw}, + } + } + responseDER, err := asn1.Marshal(response) + if err != nil { + return nil, err + } + + return asn1.Marshal(responseASN1{ + Status: asn1.Enumerated(Success), + Response: responseBytes{ + ResponseType: idPKIXOCSPBasic, + Response: responseDER, + }, + }) +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 579d2d73..474efad0 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -76,7 +76,7 @@ arguments can be passed to the kernel. The third is for low-level use by the ForkExec wrapper. Unlike the first two, it does not call into the scheduler to let it know that a system call is running. -When porting Go to an new architecture/OS, this file must be implemented for +When porting Go to a new architecture/OS, this file must be implemented for each GOOS/GOARCH pair. ### mksysnum @@ -107,7 +107,7 @@ prototype can be exported (capitalized) or not. Adding a new syscall often just requires adding a new `//sys` function prototype with the desired arguments and a capitalized name so it is exported. However, if you want the interface to the syscall to be different, often one will make an -unexported `//sys` prototype, an then write a custom wrapper in +unexported `//sys` prototype, and then write a custom wrapper in `syscall_${GOOS}.go`. ### types files @@ -137,7 +137,7 @@ some `#if/#elif` macros in your include statements. This script is used to generate the system's various constants. This doesn't just include the error numbers and error strings, but also the signal numbers -an a wide variety of miscellaneous constants. The constants come from the list +and a wide variety of miscellaneous constants. The constants come from the list of include files in the `includes_${uname}` variable. A regex then picks out the desired `#define` statements, and generates the corresponding Go constants. The error numbers and strings are generated from `#include `, and the diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index 7f29275f..e0fcd9b3 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index 98ebfad9..d702d4ad 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd +//go:build (freebsd || netbsd || openbsd) && gc +// +build freebsd netbsd openbsd // +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 2dd9013a..6e6afcaa 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -239,6 +239,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -502,6 +503,9 @@ ccflags="$@" $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL)_/ || + $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || + $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || @@ -559,6 +563,7 @@ ccflags="$@" $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /^AUDIT_RECORD_MAGIC/ && @@ -594,7 +599,7 @@ ccflags="$@" $2 == "HID_MAX_DESCRIPTOR_SIZE" || $2 ~ /^_?HIDIOC/ || $2 ~ /^BUS_(USB|HIL|BLUETOOTH|VIRTUAL)$/ || - $2 ~ /^MTD_/ || + $2 ~ /^MTD/ || $2 ~ /^OTP/ || $2 ~ /^MEM/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 9945e5f9..23f6b576 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,6 +13,7 @@ package unix import ( + "fmt" "runtime" "syscall" "unsafe" @@ -398,6 +399,38 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) /* diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 2dd7c8e3..41b91fdf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -904,6 +904,46 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrIUCV, nil } +type SockaddrNFC struct { + DeviceIdx uint32 + TargetIdx uint32 + NFCProtocol uint32 + raw RawSockaddrNFC +} + +func (sa *SockaddrNFC) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sa_family = AF_NFC + sa.raw.Dev_idx = sa.DeviceIdx + sa.raw.Target_idx = sa.TargetIdx + sa.raw.Nfc_protocol = sa.NFCProtocol + return unsafe.Pointer(&sa.raw), SizeofSockaddrNFC, nil +} + +type SockaddrNFCLLCP struct { + DeviceIdx uint32 + TargetIdx uint32 + NFCProtocol uint32 + DestinationSAP uint8 + SourceSAP uint8 + ServiceName string + raw RawSockaddrNFCLLCP +} + +func (sa *SockaddrNFCLLCP) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Sa_family = AF_NFC + sa.raw.Dev_idx = sa.DeviceIdx + sa.raw.Target_idx = sa.TargetIdx + sa.raw.Nfc_protocol = sa.NFCProtocol + sa.raw.Dsap = sa.DestinationSAP + sa.raw.Ssap = sa.SourceSAP + if len(sa.ServiceName) > len(sa.raw.Service_name) { + return nil, 0, EINVAL + } + copy(sa.raw.Service_name[:], sa.ServiceName) + sa.raw.SetServiceNameLen(len(sa.ServiceName)) + return unsafe.Pointer(&sa.raw), SizeofSockaddrNFCLLCP, nil +} + var socketProtocol = func(fd int) (int, error) { return GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) } @@ -1144,6 +1184,37 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil } + case AF_NFC: + proto, err := socketProtocol(fd) + if err != nil { + return nil, err + } + switch proto { + case NFC_SOCKPROTO_RAW: + pp := (*RawSockaddrNFC)(unsafe.Pointer(rsa)) + sa := &SockaddrNFC{ + DeviceIdx: pp.Dev_idx, + TargetIdx: pp.Target_idx, + NFCProtocol: pp.Nfc_protocol, + } + return sa, nil + case NFC_SOCKPROTO_LLCP: + pp := (*RawSockaddrNFCLLCP)(unsafe.Pointer(rsa)) + if uint64(pp.Service_name_len) > uint64(len(pp.Service_name)) { + return nil, EINVAL + } + sa := &SockaddrNFCLLCP{ + DeviceIdx: pp.Dev_idx, + TargetIdx: pp.Target_idx, + NFCProtocol: pp.Nfc_protocol, + DestinationSAP: pp.Dsap, + SourceSAP: pp.Ssap, + ServiceName: string(pp.Service_name[:pp.Service_name_len]), + } + return sa, nil + default: + return nil, EINVAL + } } return nil, EAFNOSUPPORT } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 7b52e5d8..b430536c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -378,6 +378,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 28b76411..85cd97da 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -172,6 +172,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 68877728..39a864d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -256,6 +256,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 7ed70347..7f27ebf2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -207,6 +207,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 06dec06f..27aee81d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -217,6 +217,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 8f0d0a5b..3a5621e3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -229,6 +229,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 7e65e088..cf0d36f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -215,6 +215,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint32(length) +} + //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 0b1f0d6d..5259a5fe 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -100,6 +100,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index ce9bcd31..8ef821e5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -188,6 +188,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + func InotifyInit() (fd int, err error) { return InotifyInit1(0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index a1e45694..a1c0574b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -129,6 +129,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + // Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct. // mmap2 also requires arguments to be passed in a struct; it is currently not exposed in . func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 49055a3c..de14b889 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -116,6 +116,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + //sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 991996b6..5bb48ef5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1262,6 +1262,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e644eaf5..11e57097 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1262,6 +1262,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 9c7c5e16..44090011 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1297,6 +1297,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index b265abb2..64520d31 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 3df99f28..99e9a0e0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1276,6 +1276,11 @@ const ( SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 218d3990..4c837711 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 4e4583b6..52f5bbc1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1566,6 +1566,59 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFC_ATR_REQ_GB_MAXSIZE = 0x30 + NFC_ATR_REQ_MAXSIZE = 0x40 + NFC_ATR_RES_GB_MAXSIZE = 0x2f + NFC_ATR_RES_MAXSIZE = 0x40 + NFC_COMM_ACTIVE = 0x0 + NFC_COMM_PASSIVE = 0x1 + NFC_DEVICE_NAME_MAXSIZE = 0x8 + NFC_DIRECTION_RX = 0x0 + NFC_DIRECTION_TX = 0x1 + NFC_FIRMWARE_NAME_MAXSIZE = 0x20 + NFC_GB_MAXSIZE = 0x30 + NFC_GENL_MCAST_EVENT_NAME = "events" + NFC_GENL_NAME = "nfc" + NFC_GENL_VERSION = 0x1 + NFC_HEADER_SIZE = 0x1 + NFC_ISO15693_UID_MAXSIZE = 0x8 + NFC_LLCP_MAX_SERVICE_NAME = 0x3f + NFC_LLCP_MIUX = 0x1 + NFC_LLCP_REMOTE_LTO = 0x3 + NFC_LLCP_REMOTE_MIU = 0x2 + NFC_LLCP_REMOTE_RW = 0x4 + NFC_LLCP_RW = 0x0 + NFC_NFCID1_MAXSIZE = 0xa + NFC_NFCID2_MAXSIZE = 0x8 + NFC_NFCID3_MAXSIZE = 0xa + NFC_PROTO_FELICA = 0x3 + NFC_PROTO_FELICA_MASK = 0x8 + NFC_PROTO_ISO14443 = 0x4 + NFC_PROTO_ISO14443_B = 0x6 + NFC_PROTO_ISO14443_B_MASK = 0x40 + NFC_PROTO_ISO14443_MASK = 0x10 + NFC_PROTO_ISO15693 = 0x7 + NFC_PROTO_ISO15693_MASK = 0x80 + NFC_PROTO_JEWEL = 0x1 + NFC_PROTO_JEWEL_MASK = 0x2 + NFC_PROTO_MAX = 0x8 + NFC_PROTO_MIFARE = 0x2 + NFC_PROTO_MIFARE_MASK = 0x4 + NFC_PROTO_NFC_DEP = 0x5 + NFC_PROTO_NFC_DEP_MASK = 0x20 + NFC_RAW_HEADER_SIZE = 0x2 + NFC_RF_INITIATOR = 0x0 + NFC_RF_NONE = 0x2 + NFC_RF_TARGET = 0x1 + NFC_SENSB_RES_MAXSIZE = 0xc + NFC_SENSF_RES_MAXSIZE = 0x12 + NFC_SE_DISABLED = 0x0 + NFC_SE_EMBEDDED = 0x2 + NFC_SE_ENABLED = 0x1 + NFC_SE_UICC = 0x1 + NFC_SOCKPROTO_LLCP = 0x1 + NFC_SOCKPROTO_MAX = 0x2 + NFC_SOCKPROTO_RAW = 0x0 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1991,6 +2044,11 @@ const ( QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 RAMFS_MAGIC = 0x858458f6 + RAW_PAYLOAD_DIGITAL = 0x3 + RAW_PAYLOAD_HCI = 0x2 + RAW_PAYLOAD_LLCP = 0x0 + RAW_PAYLOAD_NCI = 0x1 + RAW_PAYLOAD_PROPRIETARY = 0x4 RDTGROUP_SUPER_MAGIC = 0x7655821 REISERFS_SUPER_MAGIC = 0x52654973 RENAME_EXCHANGE = 0x2 @@ -2226,6 +2284,12 @@ const ( SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 SECURITYFS_MAGIC = 0x73636673 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_MAX = 0x4 + SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 33b401dd..09fc559e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -137,6 +137,7 @@ const ( MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index aec8754c..75730cc2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -137,6 +137,7 @@ const ( MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index e96fc5c4..127cf17a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index c429c485..957ca1ff 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -138,6 +138,7 @@ const ( MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index aaa4871a..314a2054 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 64816410..457e8de9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index a5a65d09..33cd28f6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 88ce2661..0e085ba1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 98f1ef7b..1b5928cf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc00c4d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x20 NL2 = 0x200 NL3 = 0x300 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 6b29a58e..f3a41d6e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index f60ca861..6a5a555d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NL2 = 0x200 NL3 = 0x300 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 86fdffed..a4da67ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index de677919..a7028e0e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -135,6 +135,7 @@ const ( MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 9ebc9d66..ed3b3286 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -140,6 +140,7 @@ const ( MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x20004d13 NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 2673e6c5..4c8dc0ba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -535,3 +535,107 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]int8 + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 1465cbcf..96f0e6ae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -535,3 +535,107 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]int8 + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 1d049d7a..d0ba8e9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -431,6 +431,9 @@ type Winsize struct { const ( AT_FDCWD = 0xfffafdcd AT_SYMLINK_NOFOLLOW = 0x1 + AT_REMOVEDIR = 0x2 + AT_EACCESS = 0x4 + AT_SYMLINK_FOLLOW = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index c51bc88f..1f99c024 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -672,9 +672,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 395b6918..ddf0305a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -675,9 +675,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index d3f9d254..dce0a5c8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -656,9 +656,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 434d6e8e..e2324470 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -653,9 +653,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_REMOVEDIR = 0x800 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index c9b2c9aa..c9d7eb41 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -351,6 +351,13 @@ type RawSockaddrIUCV struct { Name [8]int8 } +type RawSockaddrNFC struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 +} + type _Socklen uint32 type Linger struct { @@ -464,6 +471,7 @@ const ( SizeofSockaddrL2TPIP = 0x10 SizeofSockaddrL2TPIP6 = 0x20 SizeofSockaddrIUCV = 0x20 + SizeofSockaddrNFC = 0x10 SizeofLinger = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc @@ -1765,6 +1773,8 @@ const ( NFPROTO_NUMPROTO = 0xd ) +const SO_ORIGINAL_DST = 0x50 + type Nfgenmsg struct { Nfgen_family uint8 Version uint8 @@ -3828,3 +3838,72 @@ const ( MTD_FILE_MODE_OTP_USER = 0x2 MTD_FILE_MODE_RAW = 0x3 ) + +const ( + NFC_CMD_UNSPEC = 0x0 + NFC_CMD_GET_DEVICE = 0x1 + NFC_CMD_DEV_UP = 0x2 + NFC_CMD_DEV_DOWN = 0x3 + NFC_CMD_DEP_LINK_UP = 0x4 + NFC_CMD_DEP_LINK_DOWN = 0x5 + NFC_CMD_START_POLL = 0x6 + NFC_CMD_STOP_POLL = 0x7 + NFC_CMD_GET_TARGET = 0x8 + NFC_EVENT_TARGETS_FOUND = 0x9 + NFC_EVENT_DEVICE_ADDED = 0xa + NFC_EVENT_DEVICE_REMOVED = 0xb + NFC_EVENT_TARGET_LOST = 0xc + NFC_EVENT_TM_ACTIVATED = 0xd + NFC_EVENT_TM_DEACTIVATED = 0xe + NFC_CMD_LLC_GET_PARAMS = 0xf + NFC_CMD_LLC_SET_PARAMS = 0x10 + NFC_CMD_ENABLE_SE = 0x11 + NFC_CMD_DISABLE_SE = 0x12 + NFC_CMD_LLC_SDREQ = 0x13 + NFC_EVENT_LLC_SDRES = 0x14 + NFC_CMD_FW_DOWNLOAD = 0x15 + NFC_EVENT_SE_ADDED = 0x16 + NFC_EVENT_SE_REMOVED = 0x17 + NFC_EVENT_SE_CONNECTIVITY = 0x18 + NFC_EVENT_SE_TRANSACTION = 0x19 + NFC_CMD_GET_SE = 0x1a + NFC_CMD_SE_IO = 0x1b + NFC_CMD_ACTIVATE_TARGET = 0x1c + NFC_CMD_VENDOR = 0x1d + NFC_CMD_DEACTIVATE_TARGET = 0x1e + NFC_ATTR_UNSPEC = 0x0 + NFC_ATTR_DEVICE_INDEX = 0x1 + NFC_ATTR_DEVICE_NAME = 0x2 + NFC_ATTR_PROTOCOLS = 0x3 + NFC_ATTR_TARGET_INDEX = 0x4 + NFC_ATTR_TARGET_SENS_RES = 0x5 + NFC_ATTR_TARGET_SEL_RES = 0x6 + NFC_ATTR_TARGET_NFCID1 = 0x7 + NFC_ATTR_TARGET_SENSB_RES = 0x8 + NFC_ATTR_TARGET_SENSF_RES = 0x9 + NFC_ATTR_COMM_MODE = 0xa + NFC_ATTR_RF_MODE = 0xb + NFC_ATTR_DEVICE_POWERED = 0xc + NFC_ATTR_IM_PROTOCOLS = 0xd + NFC_ATTR_TM_PROTOCOLS = 0xe + NFC_ATTR_LLC_PARAM_LTO = 0xf + NFC_ATTR_LLC_PARAM_RW = 0x10 + NFC_ATTR_LLC_PARAM_MIUX = 0x11 + NFC_ATTR_SE = 0x12 + NFC_ATTR_LLC_SDP = 0x13 + NFC_ATTR_FIRMWARE_NAME = 0x14 + NFC_ATTR_SE_INDEX = 0x15 + NFC_ATTR_SE_TYPE = 0x16 + NFC_ATTR_SE_AID = 0x17 + NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS = 0x18 + NFC_ATTR_SE_APDU = 0x19 + NFC_ATTR_TARGET_ISO15693_DSFID = 0x1a + NFC_ATTR_TARGET_ISO15693_UID = 0x1b + NFC_ATTR_SE_PARAMS = 0x1c + NFC_ATTR_VENDOR_ID = 0x1d + NFC_ATTR_VENDOR_SUBCMD = 0x1e + NFC_ATTR_VENDOR_DATA = 0x1f + NFC_SDP_ATTR_UNSPEC = 0x0 + NFC_SDP_ATTR_URI = 0x1 + NFC_SDP_ATTR_SAP = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 4d4d283d..235c62e4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -128,6 +128,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -160,9 +171,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 8a2eed5e..99b1e5b6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -130,6 +130,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -163,9 +174,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 94b34add..cc8bba79 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -134,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -166,9 +177,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 2143de4d..fa8fe3a7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index a40216ee..e7fb8d9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -133,6 +133,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index e834b069..2fa61d59 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index e31083b0..7f363993 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 42811f7f..f3c20cb8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -133,6 +133,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index af7a7201..885d2795 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -134,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint32 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -166,9 +177,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x8 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc + SizeofSockaddrNFCLLCP = 0x58 + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 2a3afbae..a94eb8e1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -132,6 +132,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index c0de30a6..659e32eb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -132,6 +132,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -165,9 +176,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 74faf2e9..ab8ec604 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -131,6 +131,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -164,9 +175,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 9a8f0c2c..3ec08237 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -130,6 +130,17 @@ const ( FADV_NOREUSE = 0x7 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -163,9 +174,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 72cdda75..23d47447 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -134,6 +134,17 @@ const ( FADV_NOREUSE = 0x5 ) +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -167,9 +178,10 @@ type Cmsghdr struct { } const ( - SizeofIovec = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index b10e73ab..2fd2060e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -445,8 +445,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 28ed6d55..6a5a1a8a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -453,8 +453,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 4ba196eb..84cc8d01 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -450,8 +450,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index dd642bd9..c844e709 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -453,8 +453,10 @@ type Ptmget struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x400 + AT_EACCESS = 0x100 AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 1fdb0e5f..2a8b1e6f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -438,8 +438,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index e2fc93c7..b1759cf7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -438,8 +438,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 8d34b5a2..e807de20 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -439,8 +439,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index ea8f1a0d..ff3aecae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -432,8 +432,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index ec6e8bc3..9ecda691 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -432,8 +432,10 @@ type Winsize struct { const ( AT_FDCWD = -0x64 - AT_SYMLINK_FOLLOW = 0x4 + AT_EACCESS = 0x1 AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 183173af..1215b2ae 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -220,7 +220,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CancelIo(s Handle) (err error) //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessAsUserW +//sys CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = advapi32.CreateProcessAsUserW //sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList //sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList //sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1f733398..17f03312 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -680,7 +680,7 @@ const ( WTD_CHOICE_CERT = 5 WTD_STATEACTION_IGNORE = 0x00000000 - WTD_STATEACTION_VERIFY = 0x00000010 + WTD_STATEACTION_VERIFY = 0x00000001 WTD_STATEACTION_CLOSE = 0x00000002 WTD_STATEACTION_AUTO_CACHE = 0x00000003 WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 86989b54..148de0ff 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -69,6 +69,7 @@ var ( procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") procCopySid = modadvapi32.NewProc("CopySid") + procCreateProcessAsUserW = modadvapi32.NewProc("CreateProcessAsUserW") procCreateServiceW = modadvapi32.NewProc("CreateServiceW") procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") @@ -185,7 +186,6 @@ var ( procCreateMutexW = modkernel32.NewProc("CreateMutexW") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") - procCreateProcessAsUserW = modkernel32.NewProc("CreateProcessAsUserW") procCreateProcessW = modkernel32.NewProc("CreateProcessW") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") @@ -554,6 +554,18 @@ func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { return } +func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) handle = Handle(r0) @@ -1578,18 +1590,6 @@ func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, return } -func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { var _p0 uint32 if inheritHandles { diff --git a/vendor/modules.txt b/vendor/modules.txt index c87862cd..cb0ee9c9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,7 +10,7 @@ github.com/go-sql-driver/mysql github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor -# github.com/hashicorp/go-hclog v0.16.1 +# github.com/hashicorp/go-hclog v0.16.2 ## explicit github.com/hashicorp/go-hclog # github.com/hashicorp/go-immutable-radix v1.0.0 @@ -36,9 +36,9 @@ github.com/mattn/go-colorable github.com/mattn/go-isatty # github.com/minio/highwayhash v1.0.1 github.com/minio/highwayhash -# github.com/nats-io/jwt/v2 v2.0.2 +# github.com/nats-io/jwt/v2 v2.0.3 github.com/nats-io/jwt/v2 -# github.com/nats-io/nats-server/v2 v2.2.6 +# github.com/nats-io/nats-server/v2 v2.3.3 ## explicit github.com/nats-io/nats-server/v2/conf github.com/nats-io/nats-server/v2/internal/ldap @@ -47,7 +47,7 @@ github.com/nats-io/nats-server/v2/server github.com/nats-io/nats-server/v2/server/pse github.com/nats-io/nats-server/v2/server/sysmem github.com/nats-io/nats-server/v2/test -# github.com/nats-io/nats.go v1.11.0 +# github.com/nats-io/nats.go v1.11.1-0.20210623165838-4b75fc59ae30 ## explicit github.com/nats-io/nats.go github.com/nats-io/nats.go/encoders/builtin @@ -57,19 +57,19 @@ github.com/nats-io/nkeys # github.com/nats-io/nuid v1.0.1 ## explicit github.com/nats-io/nuid -# github.com/nats-io/stan.go v0.9.0 +# github.com/nats-io/stan.go v0.10.0 ## explicit github.com/nats-io/stan.go github.com/nats-io/stan.go/pb -# github.com/prometheus/procfs v0.6.0 +# github.com/prometheus/procfs v0.7.1 ## explicit github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# go.etcd.io/bbolt v1.3.5 +# go.etcd.io/bbolt v1.3.6 ## explicit go.etcd.io/bbolt -# golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a +# golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 ## explicit golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -78,8 +78,9 @@ golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/internal/subtle +golang.org/x/crypto/ocsp golang.org/x/crypto/poly1305 -# golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea +# golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c ## explicit golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader