From 3656bc271f5b8e4df6be644b4e29ed00932d3eff Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 1 Dec 2023 02:43:09 +0000 Subject: [PATCH] fix(deps): update all go dependencies main Signed-off-by: cilium-renovate[bot] <134692979+cilium-renovate[bot]@users.noreply.github.com> --- go.mod | 53 +- go.sum | 116 +- .../antlr/antlr4/runtime/Go/antlr/v4/LICENSE | 26 - .../antlr4/runtime/Go/antlr/v4/antlrdoc.go | 68 - .../antlr4/runtime/Go/antlr/v4/atn_config.go | 303 -- .../runtime/Go/antlr/v4/atn_config_set.go | 441 --- .../runtime/Go/antlr/v4/input_stream.go | 113 - .../antlr4/runtime/Go/antlr/v4/jcollect.go | 198 -- .../runtime/Go/antlr/v4/prediction_context.go | 806 ------ .../runtime/Go/antlr/v4/prediction_mode.go | 529 ---- .../runtime/Go/antlr/v4/rule_context.go | 114 - .../antlr4/runtime/Go/antlr/v4/utils_set.go | 235 -- .../github.com/antlr4-go/antlr/v4/.gitignore | 18 + vendor/github.com/antlr4-go/antlr/v4/LICENSE | 28 + .../github.com/antlr4-go/antlr/v4/README.md | 54 + .../github.com/antlr4-go/antlr/v4/antlrdoc.go | 102 + .../runtime/Go => antlr4-go}/antlr/v4/atn.go | 9 +- .../antlr4-go/antlr/v4/atn_config.go | 335 +++ .../antlr4-go/antlr/v4/atn_config_set.go | 301 ++ .../antlr/v4/atn_deserialization_options.go | 7 +- .../antlr/v4/atn_deserializer.go | 9 +- .../antlr/v4/atn_simulator.go | 17 +- .../Go => antlr4-go}/antlr/v4/atn_state.go | 224 +- .../Go => antlr4-go}/antlr/v4/atn_type.go | 0 .../Go => antlr4-go}/antlr/v4/char_stream.go | 2 +- .../antlr/v4/common_token_factory.go | 0 .../antlr/v4/common_token_stream.go | 39 +- .../Go => antlr4-go}/antlr/v4/comparators.go | 33 +- .../antlr4-go/antlr/v4/configuration.go | 214 ++ .../runtime/Go => antlr4-go}/antlr/v4/dfa.go | 47 +- .../antlr/v4/dfa_serializer.go | 2 +- .../Go => antlr4-go}/antlr/v4/dfa_state.go | 29 +- .../antlr/v4/diagnostic_error_listener.go | 11 +- .../antlr/v4/error_listener.go | 40 +- .../antlr/v4/error_strategy.go | 450 ++- .../Go => antlr4-go}/antlr/v4/errors.go | 73 +- .../Go => antlr4-go}/antlr/v4/file_stream.go | 46 +- .../antlr4-go/antlr/v4/input_stream.go | 157 ++ .../Go => antlr4-go}/antlr/v4/int_stream.go | 0 .../Go => antlr4-go}/antlr/v4/interval_set.go | 60 +- .../github.com/antlr4-go/antlr/v4/jcollect.go | 685 +++++ .../Go => antlr4-go}/antlr/v4/lexer.go | 68 +- .../Go => antlr4-go}/antlr/v4/lexer_action.go | 100 +- .../antlr/v4/lexer_action_executor.go | 61 +- .../antlr/v4/lexer_atn_simulator.go | 185 +- .../Go => antlr4-go}/antlr/v4/ll1_analyzer.go | 62 +- .../antlr4-go/antlr/v4/nostatistics.go | 47 + .../Go => antlr4-go}/antlr/v4/parser.go | 160 +- .../antlr/v4/parser_atn_simulator.go | 787 +++--- .../antlr/v4/parser_rule_context.go | 85 +- .../antlr4-go/antlr/v4/prediction_context.go | 727 +++++ .../antlr/v4/prediction_context_cache.go | 48 + .../antlr4-go/antlr/v4/prediction_mode.go | 536 ++++ .../Go => antlr4-go}/antlr/v4/recognizer.go | 65 +- .../antlr4-go/antlr/v4/rule_context.go | 40 + .../antlr/v4/semantic_context.go | 33 +- .../antlr4-go/antlr/v4/statistics.go | 281 ++ .../antlr4-go/antlr/v4/stats_data.go | 23 + .../Go => antlr4-go}/antlr/v4/token.go | 36 +- .../Go => antlr4-go}/antlr/v4/token_source.go | 0 .../Go => antlr4-go}/antlr/v4/token_stream.go | 3 +- .../antlr/v4/tokenstream_rewriter.go | 221 +- .../antlr/v4/trace_listener.go | 0 .../Go => antlr4-go}/antlr/v4/transition.go | 229 +- .../runtime/Go => antlr4-go}/antlr/v4/tree.go | 109 +- .../Go => antlr4-go}/antlr/v4/trees.go | 22 +- .../Go => antlr4-go}/antlr/v4/utils.go | 84 +- vendor/github.com/cilium/cilium/AUTHORS | 7 + .../cilium/cilium/pkg/allocator/allocator.go | 41 +- .../cilium/cilium/pkg/command/exec/doc.go | 5 + .../cilium/cilium/pkg/command/exec/exec.go | 123 + .../cilium/cilium/pkg/controller/manager.go | 62 +- .../datapath/linux/probes/attach_cgroup.go | 69 + .../pkg/datapath/linux/probes/attach_type.go | 81 + .../cilium/pkg/datapath/linux/probes/doc.go | 5 + .../pkg/datapath/linux/probes/kernel_hz.go | 151 + .../linux/probes/managed_neighbors.go | 117 + .../pkg/datapath/linux/probes/probes.go | 662 +++++ .../pkg/datapath/linux/probes/probes_linux.go | 12 + .../linux/probes/probes_unspecified.go | 12 + .../cilium/cilium/pkg/hive/cell/health.go | 6 +- .../cilium/cilium/pkg/hive/cell/invoke.go | 9 +- .../cilium/cilium/pkg/hubble/api/v1/flow.go | 2 +- .../pkg/identity/key/global_identity.go | 33 +- .../cilium/cilium/pkg/identity/reserved.go | 17 +- .../cilium/cilium/pkg/ipcache/kvstore.go | 4 +- .../cilium/cilium/pkg/ipcache/metadata.go | 70 +- .../cilium/cilium/pkg/ipcache/types.go | 5 +- .../pkg/k8s/identitybackend/identity.go | 24 +- .../cilium/cilium/pkg/k8s/network_policy.go | 20 + .../cilium/pkg/k8s/resource/resource.go | 515 ++-- .../cilium/cilium/pkg/k8s/resource_ctors.go | 11 + .../cilium/cilium/pkg/k8s/service_cache.go | 18 + .../pkg/k8s/watchers/resources/resources.go | 38 + .../cilium/pkg/kvstore/allocator/allocator.go | 12 +- .../cilium/cilium/pkg/labels/cidr/cidr.go | 152 +- .../cilium/cilium/pkg/labels/labels.go | 13 +- .../cilium/pkg/logging/logfields/logfields.go | 4 + .../cilium/cilium/pkg/metrics/interfaces.go | 33 +- .../cilium/cilium/pkg/metrics/metrics.go | 31 +- .../cilium/cilium/pkg/monitor/logrecord.go | 2 +- .../cilium/cilium/pkg/node/types/node.go | 2 +- .../cilium/cilium/pkg/option/config.go | 26 +- .../cilium/cilium/pkg/policy/api/l4.go | 7 +- .../github.com/cilium/cilium/pkg/policy/l4.go | 6 +- .../cilium/cilium/pkg/policy/mapstate.go | 128 +- .../cilium/cilium/pkg/policy/repository.go | 25 +- .../cilium/cilium/pkg/policy/resolve.go | 15 +- .../cilium/cilium/pkg/policy/rule.go | 28 +- .../cilium/cilium/pkg/policy/selectorcache.go | 52 +- .../cilium/pkg/proxy/accesslog/record.go | 6 +- .../{miekg => cilium}/dns/.codecov.yml | 0 .../{miekg => cilium}/dns/.gitignore | 0 .../github.com/{miekg => cilium}/dns/AUTHORS | 0 .../{miekg => cilium}/dns/CODEOWNERS | 0 .../{miekg => cilium}/dns/CONTRIBUTORS | 0 .../{miekg => cilium}/dns/COPYRIGHT | 0 .../github.com/{miekg => cilium}/dns/LICENSE | 0 .../{miekg => cilium}/dns/Makefile.fuzz | 0 .../{miekg => cilium}/dns/Makefile.release | 0 .../{miekg => cilium}/dns/README.md | 0 .../{miekg => cilium}/dns/acceptfunc.go | 0 .../{miekg => cilium}/dns/client.go | 49 +- .../{miekg => cilium}/dns/clientconfig.go | 0 .../github.com/{miekg => cilium}/dns/dane.go | 0 .../{miekg => cilium}/dns/defaults.go | 0 .../github.com/{miekg => cilium}/dns/dns.go | 0 .../{miekg => cilium}/dns/dnssec.go | 0 .../{miekg => cilium}/dns/dnssec_keygen.go | 0 .../{miekg => cilium}/dns/dnssec_keyscan.go | 0 .../{miekg => cilium}/dns/dnssec_privkey.go | 0 .../github.com/{miekg => cilium}/dns/doc.go | 0 .../{miekg => cilium}/dns/duplicate.go | 0 .../github.com/{miekg => cilium}/dns/edns.go | 0 .../{miekg => cilium}/dns/format.go | 0 .../github.com/{miekg => cilium}/dns/fuzz.go | 0 .../{miekg => cilium}/dns/generate.go | 0 .../github.com/{miekg => cilium}/dns/hash.go | 0 .../{miekg => cilium}/dns/labels.go | 0 .../dns/listen_no_reuseport.go | 0 .../{miekg => cilium}/dns/listen_reuseport.go | 0 .../github.com/{miekg => cilium}/dns/msg.go | 0 .../{miekg => cilium}/dns/msg_helpers.go | 0 .../{miekg => cilium}/dns/msg_truncate.go | 0 .../github.com/{miekg => cilium}/dns/nsecx.go | 0 .../{miekg => cilium}/dns/privaterr.go | 0 .../{miekg => cilium}/dns/reverse.go | 0 .../{miekg => cilium}/dns/sanitize.go | 0 .../github.com/{miekg => cilium}/dns/scan.go | 0 .../{miekg => cilium}/dns/scan_rr.go | 0 .../{miekg => cilium}/dns/serve_mux.go | 0 .../{miekg => cilium}/dns/server.go | 0 vendor/github.com/cilium/dns/shared_client.go | 253 ++ .../github.com/{miekg => cilium}/dns/sig0.go | 0 .../{miekg => cilium}/dns/singleinflight.go | 0 .../{miekg => cilium}/dns/smimea.go | 0 .../github.com/{miekg => cilium}/dns/svcb.go | 0 .../github.com/{miekg => cilium}/dns/tlsa.go | 0 .../github.com/{miekg => cilium}/dns/tools.go | 0 .../github.com/{miekg => cilium}/dns/tsig.go | 0 .../github.com/{miekg => cilium}/dns/types.go | 0 .../github.com/{miekg => cilium}/dns/udp.go | 0 .../{miekg => cilium}/dns/udp_windows.go | 0 .../{miekg => cilium}/dns/update.go | 0 .../{miekg => cilium}/dns/version.go | 0 .../github.com/{miekg => cilium}/dns/xfr.go | 0 .../{miekg => cilium}/dns/zduplicate.go | 0 .../github.com/{miekg => cilium}/dns/zmsg.go | 0 .../{miekg => cilium}/dns/ztypes.go | 0 vendor/github.com/cilium/ebpf/features/doc.go | 19 + vendor/github.com/cilium/ebpf/features/map.go | 321 +++ .../github.com/cilium/ebpf/features/misc.go | 79 + .../github.com/cilium/ebpf/features/prog.go | 297 ++ .../cilium/ebpf/features/version.go | 18 + .../little-vm-helper/pkg/kernels/conf.go | 7 +- .../little-vm-helper/pkg/kernels/dir.go | 10 +- .../little-vm-helper/pkg/kernels/git.go | 8 +- .../containerd/log/context_deprecated.go | 149 + .../containerd/containerd/plugin/context.go | 16 +- .../containerd/containerd/plugin/plugin.go | 7 +- .../containerd/plugin/plugin_go18.go | 17 +- .../containerd/plugin/plugin_other.go | 4 +- .../github.com/containerd/log/.golangci.yml | 30 + vendor/github.com/containerd/log/LICENSE | 191 ++ vendor/github.com/containerd/log/README.md | 17 + .../{containerd => }/log/context.go | 0 .../cpuguy83/go-md2man/v2/md2man/md2man.go | 6 +- .../cpuguy83/go-md2man/v2/md2man/roff.go | 30 +- .../github.com/google/cel-go/cel/BUILD.bazel | 3 + vendor/github.com/google/cel-go/cel/env.go | 15 +- .../github.com/google/cel-go/cel/folding.go | 36 +- .../github.com/google/cel-go/cel/inlining.go | 68 +- .../github.com/google/cel-go/cel/optimizer.go | 336 ++- .../github.com/google/cel-go/cel/options.go | 19 + .../github.com/google/cel-go/cel/program.go | 35 +- .../google/cel-go/checker/BUILD.bazel | 1 - .../github.com/google/cel-go/checker/cost.go | 56 +- .../google/cel-go/common/ast/ast.go | 2 + .../google/cel-go/interpreter/runtimecost.go | 47 +- .../google/cel-go/parser/BUILD.bazel | 4 +- .../google/cel-go/parser/gen/BUILD.bazel | 2 +- .../cel-go/parser/gen/cel_base_listener.go | 4 +- .../cel-go/parser/gen/cel_base_visitor.go | 5 +- .../google/cel-go/parser/gen/cel_lexer.go | 603 ++-- .../google/cel-go/parser/gen/cel_listener.go | 5 +- .../google/cel-go/parser/gen/cel_parser.go | 2478 +++++++++++------ .../google/cel-go/parser/gen/cel_visitor.go | 8 +- .../google/cel-go/parser/gen/generate.sh | 2 +- .../github.com/google/cel-go/parser/helper.go | 2 +- .../github.com/google/cel-go/parser/input.go | 4 +- .../github.com/google/cel-go/parser/parser.go | 10 +- vendor/github.com/hashicorp/errwrap/LICENSE | 354 --- vendor/github.com/hashicorp/errwrap/README.md | 89 - .../github.com/hashicorp/errwrap/errwrap.go | 178 -- .../hashicorp/go-multierror/LICENSE | 353 --- .../hashicorp/go-multierror/Makefile | 31 - .../hashicorp/go-multierror/README.md | 150 - .../hashicorp/go-multierror/append.go | 43 - .../hashicorp/go-multierror/flatten.go | 26 - .../hashicorp/go-multierror/format.go | 27 - .../hashicorp/go-multierror/group.go | 38 - .../hashicorp/go-multierror/multierror.go | 121 - .../hashicorp/go-multierror/prefix.go | 37 - .../hashicorp/go-multierror/sort.go | 16 - .../github.com/hashicorp/golang-lru/v2/2q.go | 36 +- .../runc/libcontainer/cgroups/file.go | 3 +- .../runc/libcontainer/cgroups/fs/hugetlb.go | 36 +- .../runc/libcontainer/cgroups/fs/memory.go | 6 + .../runc/libcontainer/cgroups/fs2/hugetlb.go | 30 +- vendor/github.com/spf13/cobra/.golangci.yml | 8 +- vendor/github.com/spf13/cobra/README.md | 8 +- vendor/github.com/spf13/cobra/active_help.go | 10 +- vendor/github.com/spf13/cobra/active_help.md | 157 -- .../spf13/cobra/bash_completions.go | 2 +- .../spf13/cobra/bash_completions.md | 93 - .../spf13/cobra/bash_completionsV2.go | 2 +- vendor/github.com/spf13/cobra/cobra.go | 13 +- vendor/github.com/spf13/cobra/command.go | 69 +- vendor/github.com/spf13/cobra/completions.go | 29 +- vendor/github.com/spf13/cobra/doc/README.md | 17 - vendor/github.com/spf13/cobra/doc/man_docs.md | 31 - vendor/github.com/spf13/cobra/doc/md_docs.go | 8 +- vendor/github.com/spf13/cobra/doc/md_docs.md | 115 - .../github.com/spf13/cobra/doc/rest_docs.md | 114 - .../github.com/spf13/cobra/doc/yaml_docs.md | 112 - .../spf13/cobra/fish_completions.go | 2 +- .../spf13/cobra/fish_completions.md | 4 - vendor/github.com/spf13/cobra/flag_groups.go | 68 +- .../spf13/cobra/powershell_completions.go | 6 +- .../spf13/cobra/powershell_completions.md | 3 - .../spf13/cobra/projects_using_cobra.md | 64 - .../spf13/cobra/shell_completions.md | 576 ---- vendor/github.com/spf13/cobra/user_guide.md | 726 ----- .../github.com/spf13/cobra/zsh_completions.md | 48 - vendor/go.opentelemetry.io/otel/.gitignore | 1 + vendor/go.opentelemetry.io/otel/.golangci.yml | 79 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 172 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 90 +- vendor/go.opentelemetry.io/otel/Makefile | 62 +- vendor/go.opentelemetry.io/otel/README.md | 42 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 31 +- .../otel/attribute/filter.go | 60 + .../go.opentelemetry.io/otel/attribute/set.go | 7 - .../otel/baggage/baggage.go | 14 +- .../go.opentelemetry.io/otel/internal/gen.go | 29 + .../otel/internal/global/handler.go | 7 +- .../otel/internal/global/internal_logging.go | 7 +- .../otel/metric/instrument.go | 2 + .../go.opentelemetry.io/otel/metric/meter.go | 2 + .../go.opentelemetry.io/otel/requirements.txt | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 18 +- vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/server.go | 86 +- vendor/golang.org/x/net/http2/transport.go | 3 +- .../golang.org/x/sys/execabs/execabs_go118.go | 1 - .../golang.org/x/sys/execabs/execabs_go119.go | 1 - .../sys/internal/unsafeheader/unsafeheader.go | 30 - .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 1 - vendor/golang.org/x/sys/plan9/race.go | 1 - vendor/golang.org/x/sys/plan9/race0.go | 1 - vendor/golang.org/x/sys/plan9/str.go | 1 - vendor/golang.org/x/sys/plan9/syscall.go | 1 - .../x/sys/plan9/zsyscall_plan9_386.go | 1 - .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 - .../x/sys/plan9/zsyscall_plan9_arm.go | 1 - vendor/golang.org/x/sys/unix/aliases.go | 2 - vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - .../x/sys/unix/asm_openbsd_mips64.s | 1 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 3 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 - vendor/golang.org/x/sys/unix/constants.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/dev_zos.go | 1 - vendor/golang.org/x/sys/unix/dirent.go | 1 - vendor/golang.org/x/sys/unix/endian_big.go | 1 - vendor/golang.org/x/sys/unix/endian_little.go | 1 - vendor/golang.org/x/sys/unix/env_unix.go | 1 - vendor/golang.org/x/sys/unix/epoll_zos.go | 1 - vendor/golang.org/x/sys/unix/fcntl.go | 1 - .../x/sys/unix/fcntl_linux_32bit.go | 1 - vendor/golang.org/x/sys/unix/fdset.go | 1 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 1 - vendor/golang.org/x/sys/unix/gccgo.go | 1 - vendor/golang.org/x/sys/unix/gccgo_c.c | 1 - .../x/sys/unix/gccgo_linux_amd64.go | 1 - vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 - vendor/golang.org/x/sys/unix/ioctl_signed.go | 1 - .../golang.org/x/sys/unix/ioctl_unsigned.go | 1 - vendor/golang.org/x/sys/unix/ioctl_zos.go | 1 - vendor/golang.org/x/sys/unix/mkerrors.sh | 1 - vendor/golang.org/x/sys/unix/mmap_nomremap.go | 1 - vendor/golang.org/x/sys/unix/mremap.go | 1 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 - .../golang.org/x/sys/unix/pledge_openbsd.go | 92 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 7 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 7 - vendor/golang.org/x/sys/unix/race.go | 1 - vendor/golang.org/x/sys/unix/race0.go | 1 - .../x/sys/unix/readdirent_getdents.go | 1 - .../x/sys/unix/readdirent_getdirentries.go | 1 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 - .../x/sys/unix/sockcmsg_unix_other.go | 1 - vendor/golang.org/x/sys/unix/syscall.go | 1 - vendor/golang.org/x/sys/unix/syscall_aix.go | 6 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 1 - .../golang.org/x/sys/unix/syscall_darwin.go | 186 -- .../x/sys/unix/syscall_darwin_amd64.go | 2 - .../x/sys/unix/syscall_darwin_arm64.go | 2 - .../x/sys/unix/syscall_darwin_libSystem.go | 1 - .../x/sys/unix/syscall_dragonfly.go | 198 -- .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - .../golang.org/x/sys/unix/syscall_freebsd.go | 192 -- .../x/sys/unix/syscall_freebsd_386.go | 1 - .../x/sys/unix/syscall_freebsd_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_arm.go | 1 - .../x/sys/unix/syscall_freebsd_arm64.go | 1 - .../x/sys/unix/syscall_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 - .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - .../golang.org/x/sys/unix/syscall_illumos.go | 1 - vendor/golang.org/x/sys/unix/syscall_linux.go | 118 +- .../x/sys/unix/syscall_linux_386.go | 1 - .../x/sys/unix/syscall_linux_alarm.go | 2 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 1 - .../x/sys/unix/syscall_linux_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - .../x/sys/unix/syscall_linux_gc_386.go | 1 - .../x/sys/unix/syscall_linux_gc_arm.go | 1 - .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - .../x/sys/unix/syscall_linux_loong64.go | 1 - .../x/sys/unix/syscall_linux_mips64x.go | 2 - .../x/sys/unix/syscall_linux_mipsx.go | 2 - .../x/sys/unix/syscall_linux_ppc.go | 1 - .../x/sys/unix/syscall_linux_ppc64x.go | 2 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../golang.org/x/sys/unix/syscall_netbsd.go | 261 -- .../x/sys/unix/syscall_netbsd_386.go | 1 - .../x/sys/unix/syscall_netbsd_amd64.go | 1 - .../x/sys/unix/syscall_netbsd_arm.go | 1 - .../x/sys/unix/syscall_netbsd_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_openbsd.go | 88 +- .../x/sys/unix/syscall_openbsd_386.go | 1 - .../x/sys/unix/syscall_openbsd_amd64.go | 1 - .../x/sys/unix/syscall_openbsd_arm.go | 1 - .../x/sys/unix/syscall_openbsd_arm64.go | 1 - .../x/sys/unix/syscall_openbsd_libc.go | 1 - .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 21 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 - vendor/golang.org/x/sys/unix/syscall_unix.go | 1 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - .../x/sys/unix/syscall_zos_s390x.go | 2 - vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 - vendor/golang.org/x/sys/unix/sysvshm_unix.go | 1 - .../x/sys/unix/sysvshm_unix_other.go | 1 - vendor/golang.org/x/sys/unix/timestruct.go | 1 - .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - .../x/sys/unix/zerrors_aix_ppc64.go | 1 - .../x/sys/unix/zerrors_darwin_amd64.go | 1 - .../x/sys/unix/zerrors_darwin_arm64.go | 1 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_386.go | 1 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_arm.go | 1 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 23 +- .../x/sys/unix/zerrors_linux_386.go | 3 +- .../x/sys/unix/zerrors_linux_amd64.go | 3 +- .../x/sys/unix/zerrors_linux_arm.go | 3 +- .../x/sys/unix/zerrors_linux_arm64.go | 3 +- .../x/sys/unix/zerrors_linux_loong64.go | 6 +- .../x/sys/unix/zerrors_linux_mips.go | 3 +- .../x/sys/unix/zerrors_linux_mips64.go | 3 +- .../x/sys/unix/zerrors_linux_mips64le.go | 3 +- .../x/sys/unix/zerrors_linux_mipsle.go | 3 +- .../x/sys/unix/zerrors_linux_ppc.go | 3 +- .../x/sys/unix/zerrors_linux_ppc64.go | 3 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 3 +- .../x/sys/unix/zerrors_linux_riscv64.go | 6 +- .../x/sys/unix/zerrors_linux_s390x.go | 3 +- .../x/sys/unix/zerrors_linux_sparc64.go | 3 +- .../x/sys/unix/zerrors_netbsd_386.go | 1 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - .../x/sys/unix/zerrors_netbsd_arm.go | 1 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_386.go | 1 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - .../x/sys/unix/zerrors_openbsd_arm.go | 1 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - .../x/sys/unix/zerrors_solaris_amd64.go | 1 - .../x/sys/unix/zerrors_zos_s390x.go | 1 - .../x/sys/unix/zptrace_armnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 - .../x/sys/unix/zptrace_x86_linux.go | 2 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 23 - .../x/sys/unix/zsyscall_aix_ppc64.go | 23 - .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1 - .../x/sys/unix/zsyscall_darwin_amd64.go | 41 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 149 - .../x/sys/unix/zsyscall_darwin_arm64.go | 41 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 149 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 23 - .../x/sys/unix/zsyscall_freebsd_386.go | 23 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 23 - .../x/sys/unix/zsyscall_freebsd_arm.go | 23 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 23 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 23 - .../x/sys/unix/zsyscall_illumos_amd64.go | 11 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 33 +- .../x/sys/unix/zsyscall_linux_386.go | 1 - .../x/sys/unix/zsyscall_linux_amd64.go | 1 - .../x/sys/unix/zsyscall_linux_arm.go | 1 - .../x/sys/unix/zsyscall_linux_arm64.go | 1 - .../x/sys/unix/zsyscall_linux_loong64.go | 1 - .../x/sys/unix/zsyscall_linux_mips.go | 1 - .../x/sys/unix/zsyscall_linux_mips64.go | 1 - .../x/sys/unix/zsyscall_linux_mips64le.go | 1 - .../x/sys/unix/zsyscall_linux_mipsle.go | 1 - .../x/sys/unix/zsyscall_linux_ppc.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 - .../x/sys/unix/zsyscall_linux_riscv64.go | 1 - .../x/sys/unix/zsyscall_linux_s390x.go | 1 - .../x/sys/unix/zsyscall_linux_sparc64.go | 1 - .../x/sys/unix/zsyscall_netbsd_386.go | 23 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 23 - .../x/sys/unix/zsyscall_netbsd_arm.go | 23 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 23 - .../x/sys/unix/zsyscall_openbsd_386.go | 56 +- .../x/sys/unix/zsyscall_openbsd_386.s | 15 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 46 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 15 + .../x/sys/unix/zsyscall_openbsd_arm.go | 56 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 15 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 56 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 15 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 56 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 15 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 56 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 18 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 56 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 15 + .../x/sys/unix/zsyscall_solaris_amd64.go | 257 +- .../x/sys/unix/zsyscall_zos_s390x.go | 12 - .../x/sys/unix/zsysctl_openbsd_386.go | 1 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_386.go | 1 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_linux_386.go | 3 +- .../x/sys/unix/zsysnum_linux_amd64.go | 4 +- .../x/sys/unix/zsysnum_linux_arm.go | 3 +- .../x/sys/unix/zsysnum_linux_arm64.go | 3 +- .../x/sys/unix/zsysnum_linux_loong64.go | 3 +- .../x/sys/unix/zsysnum_linux_mips.go | 3 +- .../x/sys/unix/zsysnum_linux_mips64.go | 3 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 3 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 3 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 3 +- .../x/sys/unix/zsysnum_linux_s390x.go | 3 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 3 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_zos_s390x.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - .../x/sys/unix/ztypes_darwin_amd64.go | 1 - .../x/sys/unix/ztypes_darwin_arm64.go | 1 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_386.go | 1 - .../x/sys/unix/ztypes_freebsd_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_arm.go | 1 - .../x/sys/unix/ztypes_freebsd_arm64.go | 1 - .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 21 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 1 - .../x/sys/unix/ztypes_linux_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 1 - .../x/sys/unix/ztypes_linux_arm64.go | 1 - .../x/sys/unix/ztypes_linux_loong64.go | 1 - .../x/sys/unix/ztypes_linux_mips.go | 1 - .../x/sys/unix/ztypes_linux_mips64.go | 1 - .../x/sys/unix/ztypes_linux_mips64le.go | 1 - .../x/sys/unix/ztypes_linux_mipsle.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 1 - .../x/sys/unix/ztypes_linux_ppc64.go | 1 - .../x/sys/unix/ztypes_linux_ppc64le.go | 1 - .../x/sys/unix/ztypes_linux_riscv64.go | 5 +- .../x/sys/unix/ztypes_linux_s390x.go | 1 - .../x/sys/unix/ztypes_linux_sparc64.go | 1 - .../x/sys/unix/ztypes_netbsd_386.go | 1 - .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - .../x/sys/unix/ztypes_netbsd_arm.go | 1 - .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_386.go | 1 - .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - .../x/sys/unix/ztypes_openbsd_arm.go | 1 - .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - .../x/sys/unix/ztypes_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 1 - vendor/golang.org/x/sys/windows/aliases.go | 1 - vendor/golang.org/x/sys/windows/empty.s | 1 - vendor/golang.org/x/sys/windows/eventlog.go | 1 - .../golang.org/x/sys/windows/exec_windows.go | 89 +- vendor/golang.org/x/sys/windows/mksyscall.go | 1 - vendor/golang.org/x/sys/windows/race.go | 1 - vendor/golang.org/x/sys/windows/race0.go | 1 - .../x/sys/windows/security_windows.go | 21 +- vendor/golang.org/x/sys/windows/service.go | 1 - vendor/golang.org/x/sys/windows/str.go | 1 - vendor/golang.org/x/sys/windows/syscall.go | 1 - .../x/sys/windows/syscall_windows.go | 46 +- .../golang.org/x/sys/windows/types_windows.go | 35 +- .../x/sys/windows/zsyscall_windows.go | 37 +- .../tools/go/internal/packagesdriver/sizes.go | 11 +- .../golang.org/x/tools/go/packages/golist.go | 9 +- .../x/tools/go/packages/packages.go | 13 +- .../x/tools/go/types/objectpath/objectpath.go | 827 ++++++ .../x/tools/internal/gcimporter/iexport.go | 142 +- .../x/tools/internal/gcimporter/iimport.go | 85 +- .../x/tools/internal/gocommand/invoke.go | 4 +- .../x/tools/internal/gopathwalk/walk.go | 20 +- .../x/tools/internal/imports/mod_cache.go | 2 +- .../x/tools/internal/imports/zstdlib.go | 230 ++ .../x/tools/internal/typeparams/common.go | 6 + .../x/tools/internal/typeparams/coretype.go | 8 +- .../x/tools/internal/typeparams/termlist.go | 2 +- .../internal/typeparams/typeparams_go117.go | 2 +- .../internal/typeparams/typeparams_go118.go | 2 +- .../x/tools/internal/typeparams/typeterm.go | 9 +- .../internal/typesinternal/objectpath.go | 24 + .../grpc/internal/transport/http2_server.go | 11 +- vendor/google.golang.org/grpc/server.go | 71 +- vendor/google.golang.org/grpc/version.go | 2 +- .../managedfields/internal/structuredmerge.go | 3 + .../apimachinery/pkg/util/runtime/runtime.go | 15 +- vendor/modules.txt | 83 +- 621 files changed, 14840 insertions(+), 12664 deletions(-) delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go delete mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/.gitignore create mode 100644 vendor/github.com/antlr4-go/antlr/v4/LICENSE create mode 100644 vendor/github.com/antlr4-go/antlr/v4/README.md create mode 100644 vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/atn.go (94%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/atn_deserialization_options.go (86%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/atn_deserializer.go (97%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/atn_simulator.go (66%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/atn_state.go (65%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/atn_type.go (100%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/char_stream.go (89%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/common_token_factory.go (100%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/common_token_stream.go (88%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/comparators.go (82%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/configuration.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/dfa.go (76%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/dfa_serializer.go (97%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/dfa_state.go (81%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/diagnostic_error_listener.go (92%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/error_listener.go (62%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/error_strategy.go (58%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/errors.go (73%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/file_stream.go (52%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/input_stream.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/int_stream.go (100%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/interval_set.go (82%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/jcollect.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/lexer.go (78%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/lexer_action.go (78%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/lexer_action_executor.go (70%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/lexer_atn_simulator.go (80%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/ll1_analyzer.go (73%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/nostatistics.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/parser.go (80%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/parser_atn_simulator.go (64%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/parser_rule_context.go (77%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/recognizer.go (70%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/rule_context.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/semantic_context.go (92%) create mode 100644 vendor/github.com/antlr4-go/antlr/v4/statistics.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/stats_data.go rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/token.go (86%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/token_source.go (100%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/token_stream.go (90%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/tokenstream_rewriter.go (73%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/trace_listener.go (100%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/transition.go (67%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/tree.go (62%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/trees.go (81%) rename vendor/github.com/{antlr/antlr4/runtime/Go => antlr4-go}/antlr/v4/utils.go (85%) create mode 100644 vendor/github.com/cilium/cilium/pkg/command/exec/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/command/exec/exec.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go rename vendor/github.com/{miekg => cilium}/dns/.codecov.yml (100%) rename vendor/github.com/{miekg => cilium}/dns/.gitignore (100%) rename vendor/github.com/{miekg => cilium}/dns/AUTHORS (100%) rename vendor/github.com/{miekg => cilium}/dns/CODEOWNERS (100%) rename vendor/github.com/{miekg => cilium}/dns/CONTRIBUTORS (100%) rename vendor/github.com/{miekg => cilium}/dns/COPYRIGHT (100%) rename vendor/github.com/{miekg => cilium}/dns/LICENSE (100%) rename vendor/github.com/{miekg => cilium}/dns/Makefile.fuzz (100%) rename vendor/github.com/{miekg => cilium}/dns/Makefile.release (100%) rename vendor/github.com/{miekg => cilium}/dns/README.md (100%) rename vendor/github.com/{miekg => cilium}/dns/acceptfunc.go (100%) rename vendor/github.com/{miekg => cilium}/dns/client.go (98%) rename vendor/github.com/{miekg => cilium}/dns/clientconfig.go (100%) rename vendor/github.com/{miekg => cilium}/dns/dane.go (100%) rename vendor/github.com/{miekg => cilium}/dns/defaults.go (100%) rename vendor/github.com/{miekg => cilium}/dns/dns.go (100%) rename vendor/github.com/{miekg => cilium}/dns/dnssec.go (100%) rename vendor/github.com/{miekg => cilium}/dns/dnssec_keygen.go (100%) rename vendor/github.com/{miekg => cilium}/dns/dnssec_keyscan.go (100%) rename vendor/github.com/{miekg => cilium}/dns/dnssec_privkey.go (100%) rename vendor/github.com/{miekg => cilium}/dns/doc.go (100%) rename vendor/github.com/{miekg => cilium}/dns/duplicate.go (100%) rename vendor/github.com/{miekg => cilium}/dns/edns.go (100%) rename vendor/github.com/{miekg => cilium}/dns/format.go (100%) rename vendor/github.com/{miekg => cilium}/dns/fuzz.go (100%) rename vendor/github.com/{miekg => cilium}/dns/generate.go (100%) rename vendor/github.com/{miekg => cilium}/dns/hash.go (100%) rename vendor/github.com/{miekg => cilium}/dns/labels.go (100%) rename vendor/github.com/{miekg => cilium}/dns/listen_no_reuseport.go (100%) rename vendor/github.com/{miekg => cilium}/dns/listen_reuseport.go (100%) rename vendor/github.com/{miekg => cilium}/dns/msg.go (100%) rename vendor/github.com/{miekg => cilium}/dns/msg_helpers.go (100%) rename vendor/github.com/{miekg => cilium}/dns/msg_truncate.go (100%) rename vendor/github.com/{miekg => cilium}/dns/nsecx.go (100%) rename vendor/github.com/{miekg => cilium}/dns/privaterr.go (100%) rename vendor/github.com/{miekg => cilium}/dns/reverse.go (100%) rename vendor/github.com/{miekg => cilium}/dns/sanitize.go (100%) rename vendor/github.com/{miekg => cilium}/dns/scan.go (100%) rename vendor/github.com/{miekg => cilium}/dns/scan_rr.go (100%) rename vendor/github.com/{miekg => cilium}/dns/serve_mux.go (100%) rename vendor/github.com/{miekg => cilium}/dns/server.go (100%) create mode 100644 vendor/github.com/cilium/dns/shared_client.go rename vendor/github.com/{miekg => cilium}/dns/sig0.go (100%) rename vendor/github.com/{miekg => cilium}/dns/singleinflight.go (100%) rename vendor/github.com/{miekg => cilium}/dns/smimea.go (100%) rename vendor/github.com/{miekg => cilium}/dns/svcb.go (100%) rename vendor/github.com/{miekg => cilium}/dns/tlsa.go (100%) rename vendor/github.com/{miekg => cilium}/dns/tools.go (100%) rename vendor/github.com/{miekg => cilium}/dns/tsig.go (100%) rename vendor/github.com/{miekg => cilium}/dns/types.go (100%) rename vendor/github.com/{miekg => cilium}/dns/udp.go (100%) rename vendor/github.com/{miekg => cilium}/dns/udp_windows.go (100%) rename vendor/github.com/{miekg => cilium}/dns/update.go (100%) rename vendor/github.com/{miekg => cilium}/dns/version.go (100%) rename vendor/github.com/{miekg => cilium}/dns/xfr.go (100%) rename vendor/github.com/{miekg => cilium}/dns/zduplicate.go (100%) rename vendor/github.com/{miekg => cilium}/dns/zmsg.go (100%) rename vendor/github.com/{miekg => cilium}/dns/ztypes.go (100%) create mode 100644 vendor/github.com/cilium/ebpf/features/doc.go create mode 100644 vendor/github.com/cilium/ebpf/features/map.go create mode 100644 vendor/github.com/cilium/ebpf/features/misc.go create mode 100644 vendor/github.com/cilium/ebpf/features/prog.go create mode 100644 vendor/github.com/cilium/ebpf/features/version.go create mode 100644 vendor/github.com/containerd/containerd/log/context_deprecated.go create mode 100644 vendor/github.com/containerd/log/.golangci.yml create mode 100644 vendor/github.com/containerd/log/LICENSE create mode 100644 vendor/github.com/containerd/log/README.md rename vendor/github.com/containerd/{containerd => }/log/context.go (100%) delete mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE delete mode 100644 vendor/github.com/hashicorp/errwrap/README.md delete mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile delete mode 100644 vendor/github.com/hashicorp/go-multierror/README.md delete mode 100644 vendor/github.com/hashicorp/go-multierror/append.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/format.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/group.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go delete mode 100644 vendor/github.com/spf13/cobra/active_help.md delete mode 100644 vendor/github.com/spf13/cobra/bash_completions.md delete mode 100644 vendor/github.com/spf13/cobra/doc/README.md delete mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.md delete mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.md delete mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.md delete mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.md delete mode 100644 vendor/github.com/spf13/cobra/fish_completions.md delete mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md delete mode 100644 vendor/github.com/spf13/cobra/projects_using_cobra.md delete mode 100644 vendor/github.com/spf13/cobra/shell_completions.md delete mode 100644 vendor/github.com/spf13/cobra/user_guide.md delete mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/filter.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/objectpath.go diff --git a/go.mod b/go.mod index 48b2f9bfa64..4b329fb75f7 100644 --- a/go.mod +++ b/go.mod @@ -4,36 +4,36 @@ go 1.21 require ( github.com/bombsimon/logrusr/v4 v4.0.0 - github.com/cilium/cilium v1.14.2 + github.com/cilium/cilium v1.14.4 github.com/cilium/ebpf v0.11.0 - github.com/cilium/little-vm-helper v0.0.13-0.20230822094713-37431633085a + github.com/cilium/little-vm-helper v0.0.13 github.com/cilium/lumberjack/v2 v2.3.0 github.com/cilium/tetragon/api v0.0.0-00010101000000-000000000000 github.com/cilium/tetragon/pkg/k8s v0.0.0-00010101000000-000000000000 github.com/containerd/cgroups v1.1.0 - github.com/containerd/containerd v1.7.6 + github.com/containerd/containerd v1.7.10 github.com/fatih/color v1.15.0 github.com/go-openapi/strfmt v0.21.7 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.3 - github.com/google/cel-go v0.18.1 + github.com/google/cel-go v0.18.2 github.com/google/go-cmp v0.5.9 github.com/google/gops v0.3.28 github.com/google/uuid v1.3.1 github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 - github.com/hashicorp/golang-lru/v2 v2.0.6 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/iancoleman/strcase v0.3.0 github.com/jpillora/longestcommon v0.0.0-20161227235612-adb9d91ee629 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/mennanov/fmutils v0.2.1 - github.com/opencontainers/runc v1.1.9 + github.com/opencontainers/runc v1.1.10 github.com/opencontainers/runtime-spec v1.1.0 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.16.0 github.com/sryoya/protorand v0.0.0-20230527172419-e5ae2594eadc @@ -43,18 +43,18 @@ require ( go.uber.org/multierr v1.11.0 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.12.0 + golang.org/x/sys v0.14.0 golang.org/x/time v0.3.0 - google.golang.org/grpc v1.58.2 + google.golang.org/grpc v1.58.3 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.27.6 - k8s.io/apiextensions-apiserver v0.27.6 - k8s.io/apimachinery v0.27.6 - k8s.io/client-go v0.27.6 - k8s.io/code-generator v0.27.6 + k8s.io/api v0.27.8 + k8s.io/apiextensions-apiserver v0.27.8 + k8s.io/apimachinery v0.27.8 + k8s.io/client-go v0.27.8 + k8s.io/code-generator v0.27.8 k8s.io/klog/v2 v2.100.1 - sigs.k8s.io/controller-runtime v0.15.2 + sigs.k8s.io/controller-runtime v0.15.3 sigs.k8s.io/controller-tools v0.12.1 sigs.k8s.io/e2e-framework v0.2.0 sigs.k8s.io/yaml v1.3.0 @@ -62,19 +62,21 @@ require ( require ( github.com/BurntSushi/toml v1.0.0 // indirect - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/armon/go-metrics v0.4.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cilium/dns v1.1.51-0.20231108175042-eaf71f6affd2 // indirect github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/containerd/ttrpc v1.2.2 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect @@ -104,12 +106,10 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0 // indirect github.com/hashicorp/consul/api v1.21.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.2.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect @@ -127,7 +127,6 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.43 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -164,17 +163,17 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect go.etcd.io/etcd/client/v3 v3.5.9 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/term v0.11.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.11.0 // indirect + golang.org/x/tools v0.13.0 // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect @@ -183,7 +182,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.27.6 // indirect + k8s.io/component-base v0.27.8 // indirect k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4 // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect diff --git a/go.sum b/go.sum index 20d21545460..f77368d1a62 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,8 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs= -github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= @@ -407,8 +407,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= @@ -448,18 +448,20 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= -github.com/cilium/cilium v1.14.2 h1:NVz14uSoKB0Y37iFveMQGYH03ZflEuZegmQWpTMf5TM= -github.com/cilium/cilium v1.14.2/go.mod h1:ghd9LkTSbRPtJal0Bsdq1ise+j5Ezy14xgaM2o3XLCI= +github.com/cilium/cilium v1.14.4 h1:iJ/LgrFMTHhl0/fMas6T1IY7VWGhtkh/B4v921GcNQk= +github.com/cilium/cilium v1.14.4/go.mod h1:lz48ydfHJ2Q/dqAOIrAzQXmypA5w1GAi7vipQorETE8= github.com/cilium/client-go v0.27.2-fix h1:dbFQGtP5Y1lFVBhkMCIAwyf/PUWFBn8M1ZTCKA6BUdw= github.com/cilium/client-go v0.27.2-fix/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ= github.com/cilium/controller-tools v0.6.2 h1:oIkqAzqncKsm+lQFJVP6n+bqHOVs9nUZ06hgZ4PxlMM= github.com/cilium/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= github.com/cilium/dns v1.1.51-0.20220729113855-5b94b11b46fc h1:tNEOByqRaBwlf1ANK/U/sd5oOIkxkPkpCVqaabxvU84= github.com/cilium/dns v1.1.51-0.20220729113855-5b94b11b46fc/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/cilium/dns v1.1.51-0.20231108175042-eaf71f6affd2 h1:lzrDLyBuW1kV77Uku8StYODSgYRajRkV1Vvw0vaUDT8= +github.com/cilium/dns v1.1.51-0.20231108175042-eaf71f6affd2/go.mod h1:/7LC2GOgyXJ7maupZlaVIumYQiGPIgllSf6mA9sg6RU= github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y= github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= -github.com/cilium/little-vm-helper v0.0.13-0.20230822094713-37431633085a h1:A507gvPbURMidkslEIOENfyLiqr5vSxsiPoD/V4GXrg= -github.com/cilium/little-vm-helper v0.0.13-0.20230822094713-37431633085a/go.mod h1:wErT4smPfNaVj2s9iaTKzP/Q8hN/5o5SH5GAp2OPeRA= +github.com/cilium/little-vm-helper v0.0.13 h1:pRyaz3Rg+S6u23z7EDOrTtBRRleZ1md2D/BTMbz3d30= +github.com/cilium/little-vm-helper v0.0.13/go.mod h1:mtoEi6aNRMWCkyyP6y+K7+/KUUgQWtWZ4a7L69Fg3WU= github.com/cilium/lumberjack/v2 v2.3.0 h1:IhVJMvPpqDYmQzC0KDhAoy7KlaRsyOsZnT97Nsa3u0o= github.com/cilium/lumberjack/v2 v2.3.0/go.mod h1:yfbtPGmg4i//5oEqzaMxDqSWqgfZFmMoV70Mc2k6v0A= github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c h1:/NqY4jLr92f7VcUJe1gHS6CgSGWFUCeD2f4QhxO8tgE= @@ -483,8 +485,10 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= -github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= +github.com/containerd/containerd v1.7.10 h1:2nfZyT8BV0C3iKu/SsGxKVAf9dp5W7l9nA8JmWmDGuo= +github.com/containerd/containerd v1.7.10/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= @@ -503,8 +507,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -563,10 +567,10 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4= -github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= -github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A= -github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git/v5 v5.10.0 h1:F0x3xXrAWmhwtzoCokU4IMPcBdncG+HAAqi9FcOOjbQ= +github.com/go-git/go-git/v5 v5.10.0/go.mod h1:1FOZ/pQnqw24ghP2n7cunVl0ON55BsjPYvhWHvZGhoo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -712,8 +716,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.18.1 h1:V/lAXKq4C3BYLDy/ARzMtpkEEYfHQpZzVyzy69nEUjs= -github.com/google/cel-go v0.18.1/go.mod h1:PVAybmSnWkNMUZR/tEWFUiJ1Np4Hz0MHsZJcgC4zln4= +github.com/google/cel-go v0.18.2 h1:L0B6sNBSVmt0OyECi8v6VOS74KOc9W/tLiWKfZABvf4= +github.com/google/cel-go v0.18.2/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -842,8 +846,8 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.6 h1:3xi/Cafd1NaoEnS/yDssIiuVeDVywU0QdFGl3aQaQHM= -github.com/hashicorp/golang-lru/v2 v2.0.6/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -1027,8 +1031,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalNwQLM= -github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40= +github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= @@ -1145,8 +1149,8 @@ github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1247,14 +1251,14 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= -go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1290,8 +1294,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1410,8 +1414,8 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1566,8 +1570,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.1-0.20230616193735-e0c3b6e6ae3b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1577,8 +1581,8 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1677,8 +1681,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1903,8 +1907,8 @@ google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= -google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1973,22 +1977,22 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4= -k8s.io/api v0.27.6 h1:PBWu/lywJe2qQcshMjubzcBg7+XDZOo7O8JJAWuYtUo= -k8s.io/api v0.27.6/go.mod h1:AQYj0UsFCp3qJE7bOVnUuy4orCsXVkvHefnbYQiNWgk= +k8s.io/api v0.27.8 h1:Ja93gbyII5c3TJzWefEwGhlqC5SZksWhzRS+OYHIocU= +k8s.io/api v0.27.8/go.mod h1:2HuWJC6gpx4UScY+ezziNzv6j6Jqd2q0rGgobYSSjcs= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= -k8s.io/apiextensions-apiserver v0.27.6 h1:mOwSBJtThZhpJr+8gEkc3wFDIjq87E3JspR5mtZxIg8= -k8s.io/apiextensions-apiserver v0.27.6/go.mod h1:AVNlLYRrESG5Poo6ASRUhY2pvoKPcNt8y/IuZ4lx3o8= +k8s.io/apiextensions-apiserver v0.27.8 h1:u9PON71euIhVbHdZ5YlznpY60GtRjPagf1mQXLo1siA= +k8s.io/apiextensions-apiserver v0.27.8/go.mod h1:qBlRJTKCA0gnFVCsjzbz3YJZ49TCBNEwvEF2TPMRqOs= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= -k8s.io/apimachinery v0.27.6 h1:mGU8jmBq5o8mWBov+mLjdTBcU+etTE19waies4AQ6NE= -k8s.io/apimachinery v0.27.6/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/apimachinery v0.27.8 h1:Xg+ogjDm8s7KmV3vZGf7uOZ0jrC6FPy2Lk/h7BIRmvg= +k8s.io/apimachinery v0.27.8/go.mod h1:EIXLxLt/b1muPITiF5zlrcm7I+YnXsIgM+0GdnPTQvA= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= -k8s.io/code-generator v0.27.6 h1:1zkSDvylcA11s91aYg5U7fZ24EXMZ+KIDOj/Z3Ti4c8= -k8s.io/code-generator v0.27.6/go.mod h1:DPung1sI5vBgn4AGKtlPRQAyagj/ir/4jI55ipZHVww= +k8s.io/code-generator v0.27.8 h1:O5vWP56nidZl0PiSNdyooar0sHh8bsSeN8W0nE6jn6E= +k8s.io/code-generator v0.27.8/go.mod h1:NEx95JBRos8MSki+CuSoiEyKk6yv1rC4z/eY8DCZ/Rw= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= -k8s.io/component-base v0.27.6 h1:hF5WxX7Tpi9/dXAbLjPVkIA6CA6Pi6r9JOHyo0uCDYI= -k8s.io/component-base v0.27.6/go.mod h1:NvjLtaneUeb0GgMPpCBF+4LNB9GuhDHi16uUTjBhQfU= +k8s.io/component-base v0.27.8 h1:O8YRFv/wWvoo9z62p1N52lq+w5FpzILAlE1h8b9o3K8= +k8s.io/component-base v0.27.8/go.mod h1:h3uyZl+bFQeuLRz3owfSLaw3JKTrn6gmbvybkkW2z+I= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -2013,8 +2017,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.15.2 h1:9V7b7SDQSJ08IIsJ6CY1CE85Okhp87dyTMNDG0FS7f4= -sigs.k8s.io/controller-runtime v0.15.2/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= +sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= sigs.k8s.io/e2e-framework v0.2.0 h1:gD6AWWAHFcHibI69E9TgkNFhh0mVwWtRCHy2RU057jQ= sigs.k8s.io/e2e-framework v0.2.0/go.mod h1:E6JXj/V4PIlb95jsn2WrNKG+Shb45xaaI7C0+BH4PL8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE deleted file mode 100644 index 52cf18e425e..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright 2021 The ANTLR Project - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go deleted file mode 100644 index ab51212676f..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Package antlr implements the Go version of the ANTLR 4 runtime. - -# The ANTLR Tool - -ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, -or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. -From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface -(or visitor) that makes it easy to respond to the recognition of phrases of interest. - -# Code Generation - -ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a -runtime library, written specifically to support the generated code in the target language. This library is the -runtime for the Go target. - -To generate code for the go target, it is generally recommended to place the source grammar files in a package of -their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory -it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean -that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other -way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in -your IDE, or configuration in your CI system. - -Here is a general template for an ANTLR based recognizer in Go: - - . - ├── myproject - ├── parser - │ ├── mygrammar.g4 - │ ├── antlr-4.12.0-complete.jar - │ ├── error_listeners.go - │ ├── generate.go - │ ├── generate.sh - ├── go.mod - ├── go.sum - ├── main.go - └── main_test.go - -Make sure that the package statement in your grammar file(s) reflects the go package they exist in. -The generate.go file then looks like this: - - package parser - - //go:generate ./generate.sh - -And the generate.sh file will look similar to this: - - #!/bin/sh - - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' - antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 - -depending on whether you want visitors or listeners or any other ANTLR options. - -From the command line at the root of your package “myproject” you can then simply issue the command: - - go generate ./... - -# Copyright Notice - -Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - -Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. - -[target languages]: https://github.com/antlr/antlr4/tree/master/runtime -[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt -*/ -package antlr diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go deleted file mode 100644 index 7619fa172ed..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - Equals(o Collectable[ATNConfig]) bool - Hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// Equals is the default comparison function for an ATNConfig when no specialist implementation is required -// for a collection. -// -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { - if b == o { - return true - } else if o == nil { - return false - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.Equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.Equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -// Hash is the default hash function for BaseATNConfig, when no specialist hash function -// is required for a collection -func (b *BaseATNConfig) Hash() int { - var c int - if b.context != nil { - c = b.context.Hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.Hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.GetStateNumber()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.Hash()) - h = murmurUpdate(h, l.semanticContext.Hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.Hash()) - h = murmurFinish(h, 6) - return h -} - -// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { - if l == other { - return true - } - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.Equals(othert.BaseATNConfig) -} - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go deleted file mode 100644 index 43e9b33f3bd..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type ATNConfigSet interface { - Hash() int - Equals(o Collectable[ATNConfig]) bool - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *JStore[ATNState, Comparator[ATNState]] - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - Alts() *BitSet - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *JStore[ATNConfig, Comparator[ATNConfig]] - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not, protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func (b *BaseATNConfigSet) Alts() *BitSet { - alts := NewBitSet() - for _, it := range b.configs { - alts.add(it.GetAlt()) - } - return alts -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing, present := b.configLookup.Put(config) - - // The config was not already in the set - // - if !present { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { - - // states uses the standard comparator provided by the ATNState instance - // - states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst) - - for i := 0; i < len(b.configs); i++ { - states.Put(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.Len() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -// Compare is a hack function just to verify that adding DFAstares to the known -// set works, so long as comparison of ATNConfigSet s works. For that to work, we -// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't -// know the order, so we do this inefficient hack. If this proves the point, then -// we can change the config set to a better structure. -func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { - if len(b.configs) != len(bs.configs) { - return false - } - - for _, c := range b.configs { - found := false - for _, c2 := range bs.configs { - if c.Equals(c2) { - found = true - break - } - } - - if !found { - return false - } - - } - return true -} - -func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext && - b.Compare(other2) -} - -func (b *BaseATNConfigSet) Hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := 1 - for _, config := range b.configs { - h = 31*h + config.Hash() - } - return h -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - // This set uses the standard Hash() and Equals() from ATNConfig - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func hashATNConfig(i interface{}) int { - o := i.(ATNConfig) - hash := 7 - hash = 31*hash + o.GetState().GetStateNumber() - hash = 31*hash + o.GetAlt() - hash = 31*hash + o.GetSemanticContext().Hash() - return hash -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() { - return false - } - - if ai.GetAlt() != bi.GetAlt() { - return false - } - - return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go deleted file mode 100644 index a8b889cedb9..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go deleted file mode 100644 index e5a74f0c6c4..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go +++ /dev/null @@ -1,198 +0,0 @@ -package antlr - -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -import ( - "sort" -) - -// Collectable is an interface that a struct should implement if it is to be -// usable as a key in these collections. -type Collectable[T any] interface { - Hash() int - Equals(other Collectable[T]) bool -} - -type Comparator[T any] interface { - Hash1(o T) int - Equals2(T, T) bool -} - -// JStore implements a container that allows the use of a struct to calculate the key -// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just -// serve the needs of the ANTLR Go runtime. -// -// For ease of porting the logic of the runtime from the master target (Java), this collection -// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() -// function as the key. The values are stored in a standard go map which internally is a form of hashmap -// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with -// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't -// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and -// we understand the requirements, then this is fine - this is not a general purpose collection. -type JStore[T any, C Comparator[T]] struct { - store map[int][]T - len int - comparator Comparator[T] -} - -func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { - - if comparator == nil { - panic("comparator cannot be nil") - } - - s := &JStore[T, C]{ - store: make(map[int][]T, 1), - comparator: comparator, - } - return s -} - -// Put will store given value in the collection. Note that the key for storage is generated from -// the value itself - this is specifically because that is what ANTLR needs - this would not be useful -// as any kind of general collection. -// -// If the key has a hash conflict, then the value will be added to the slice of values associated with the -// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is -// tested by calling the equals() method on the key. -// -// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true -// -// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. -func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn - - kh := s.comparator.Hash1(value) - - for _, v1 := range s.store[kh] { - if s.comparator.Equals2(value, v1) { - return v1, true - } - } - s.store[kh] = append(s.store[kh], value) - s.len++ - return value, false -} - -// Get will return the value associated with the key - the type of the key is the same type as the value -// which would not generally be useful, but this is a specific thing for ANTLR where the key is -// generated using the object we are going to store. -func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn - - kh := s.comparator.Hash1(key) - - for _, v := range s.store[kh] { - if s.comparator.Equals2(key, v) { - return v, true - } - } - return key, false -} - -// Contains returns true if the given key is present in the store -func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn - - _, present := s.Get(key) - return present -} - -func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { - vs := make([]T, 0, len(s.store)) - for _, v := range s.store { - vs = append(vs, v...) - } - sort.Slice(vs, func(i, j int) bool { - return less(vs[i], vs[j]) - }) - - return vs -} - -func (s *JStore[T, C]) Each(f func(T) bool) { - for _, e := range s.store { - for _, v := range e { - f(v) - } - } -} - -func (s *JStore[T, C]) Len() int { - return s.len -} - -func (s *JStore[T, C]) Values() []T { - vs := make([]T, 0, len(s.store)) - for _, e := range s.store { - for _, v := range e { - vs = append(vs, v) - } - } - return vs -} - -type entry[K, V any] struct { - key K - val V -} - -type JMap[K, V any, C Comparator[K]] struct { - store map[int][]*entry[K, V] - len int - comparator Comparator[K] -} - -func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { - return &JMap[K, V, C]{ - store: make(map[int][]*entry[K, V], 1), - comparator: comparator, - } -} - -func (m *JMap[K, V, C]) Put(key K, val V) { - kh := m.comparator.Hash1(key) - - m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) - m.len++ -} - -func (m *JMap[K, V, C]) Values() []V { - vs := make([]V, 0, len(m.store)) - for _, e := range m.store { - for _, v := range e { - vs = append(vs, v.val) - } - } - return vs -} - -func (m *JMap[K, V, C]) Get(key K) (V, bool) { - - var none V - kh := m.comparator.Hash1(key) - for _, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - return e.val, true - } - } - return none, false -} - -func (m *JMap[K, V, C]) Len() int { - return len(m.store) -} - -func (m *JMap[K, V, C]) Delete(key K) { - kh := m.comparator.Hash1(key) - for i, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) - m.len-- - return - } - } -} - -func (m *JMap[K, V, C]) Clear() { - m.store = make(map[int][]*entry[K, V]) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go deleted file mode 100644 index ba62af36108..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "golang.org/x/exp/slices" - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - Hash() int - Equals(interface{}) bool - GetParent(int) PredictionContext - getReturnState(int) int - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.Hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -var _emptyPredictionContextHash int - -func init() { - _emptyPredictionContextHash = murmurInit(1) - _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) -} - -func calculateEmptyHash() int { - return _emptyPredictionContextHash -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - var cachedHash int - if parent != nil { - cachedHash = calculateHash(parent, returnState) - } else { - cachedHash = calculateEmptyHash() - } - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(cachedHash) - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { - if b == other { - return true - } - if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != otherP.getReturnState(0) { - return false - } - if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.Equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - p.cachedHash = calculateEmptyHash() - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) Hash() int { - return e.cachedHash -} - -func (e *EmptyPredictionContext) Equals(other interface{}) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - hash := murmurInit(1) - - for _, parent := range parents { - hash = murmurUpdate(hash, parent.Hash()) - } - - for _, returnState := range returnStates { - hash = murmurUpdate(hash, returnState) - } - - hash = murmurFinish(hash, len(parents)<<1) - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(hash) - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -// Equals is the default comparison function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Equals(o interface{}) bool { - if a == o { - return true - } - other, ok := o.(*ArrayPredictionContext) - if !ok { - return false - } - if a.cachedHash != other.Hash() { - return false // can't be same if hash is different - } - - // Must compare the actual array elements and not just the array address - // - return slices.Equal(a.returnStates, other.returnStates) && - slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { - return x.Equals(y) - }) -} - -// Hash is the default hash function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Hash() int { - return a.BasePredictionContext.cachedHash -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - - // Share same graph if both same - // - if a == b || a.Equals(b) { - return a - } - - // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test - // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created - // from it. - // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion - // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from - // either of them. - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - - // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters - // here. - // - // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here - - var arp, arb *ArrayPredictionContext - var ok bool - if arp, ok = a.(*ArrayPredictionContext); ok { - } else if _, ok = a.(*BaseSingletonPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } else if _, ok = a.(*EmptyPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - if arb, ok = b.(*ArrayPredictionContext); ok { - } else if _, ok = b.(*BaseSingletonPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } else if _, ok = b.(*EmptyPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - // Both arp and arb - return mergeArrays(arp, arb, rootIsWildcard, mergeCache) -} - -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc -} - -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems - if M == a { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), a) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), b) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), M) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) - } - return M -} - -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go deleted file mode 100644 index 7b9b72fab1e..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -// -// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

-// -//

When the ATN simulation reaches the state before {@code ”}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x”} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -// -// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) - - for _, c := range configs.GetItems() { - - alts, ok := configToAlts.Get(c) - if !ok { - alts = NewBitSet() - configToAlts.Put(c, alts) - } - alts.add(c.GetAlt()) - } - - return configToAlts.Values() -} - -// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go deleted file mode 100644 index 210699ba234..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go deleted file mode 100644 index c9bd6751e3a..00000000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go +++ /dev/null @@ -1,235 +0,0 @@ -package antlr - -import "math" - -const ( - _initalCapacity = 16 - _initalBucketCapacity = 8 - _loadFactor = 0.75 -) - -type Set interface { - Add(value interface{}) (added interface{}) - Len() int - Get(value interface{}) (found interface{}) - Contains(value interface{}) bool - Values() []interface{} - Each(f func(interface{}) bool) -} - -type array2DHashSet struct { - buckets [][]Collectable[any] - hashcodeFunction func(interface{}) int - equalsFunction func(Collectable[any], Collectable[any]) bool - - n int // How many elements in set - threshold int // when to expand - - currentPrime int // jump by 4 primes each expand or whatever - initialBucketCapacity int -} - -func (as *array2DHashSet) Each(f func(interface{}) bool) { - if as.Len() < 1 { - return - } - - for _, bucket := range as.buckets { - for _, o := range bucket { - if o == nil { - break - } - if !f(o) { - return - } - } - } -} - -func (as *array2DHashSet) Values() []interface{} { - if as.Len() < 1 { - return nil - } - - values := make([]interface{}, 0, as.Len()) - as.Each(func(i interface{}) bool { - values = append(values, i) - return true - }) - return values -} - -func (as *array2DHashSet) Contains(value Collectable[any]) bool { - return as.Get(value) != nil -} - -func (as *array2DHashSet) Add(value Collectable[any]) interface{} { - if as.n > as.threshold { - as.expand() - } - return as.innerAdd(value) -} - -func (as *array2DHashSet) expand() { - old := as.buckets - - as.currentPrime += 4 - - var ( - newCapacity = len(as.buckets) << 1 - newTable = as.createBuckets(newCapacity) - newBucketLengths = make([]int, len(newTable)) - ) - - as.buckets = newTable - as.threshold = int(float64(newCapacity) * _loadFactor) - - for _, bucket := range old { - if bucket == nil { - continue - } - - for _, o := range bucket { - if o == nil { - break - } - - b := as.getBuckets(o) - bucketLength := newBucketLengths[b] - var newBucket []Collectable[any] - if bucketLength == 0 { - // new bucket - newBucket = as.createBucket(as.initialBucketCapacity) - newTable[b] = newBucket - } else { - newBucket = newTable[b] - if bucketLength == len(newBucket) { - // expand - newBucketCopy := make([]Collectable[any], len(newBucket)<<1) - copy(newBucketCopy[:bucketLength], newBucket) - newBucket = newBucketCopy - newTable[b] = newBucket - } - } - - newBucket[bucketLength] = o - newBucketLengths[b]++ - } - } -} - -func (as *array2DHashSet) Len() int { - return as.n -} - -func (as *array2DHashSet) Get(o Collectable[any]) interface{} { - if o == nil { - return nil - } - - b := as.getBuckets(o) - bucket := as.buckets[b] - if bucket == nil { // no bucket - return nil - } - - for _, e := range bucket { - if e == nil { - return nil // empty slot; not there - } - if as.equalsFunction(e, o) { - return e - } - } - - return nil -} - -func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { - b := as.getBuckets(o) - - bucket := as.buckets[b] - - // new bucket - if bucket == nil { - bucket = as.createBucket(as.initialBucketCapacity) - bucket[0] = o - - as.buckets[b] = bucket - as.n++ - return o - } - - // look for it in bucket - for i := 0; i < len(bucket); i++ { - existing := bucket[i] - if existing == nil { // empty slot; not there, add. - bucket[i] = o - as.n++ - return o - } - - if as.equalsFunction(existing, o) { // found existing, quit - return existing - } - } - - // full bucket, expand and add to end - oldLength := len(bucket) - bucketCopy := make([]Collectable[any], oldLength<<1) - copy(bucketCopy[:oldLength], bucket) - bucket = bucketCopy - as.buckets[b] = bucket - bucket[oldLength] = o - as.n++ - return o -} - -func (as *array2DHashSet) getBuckets(value Collectable[any]) int { - hash := as.hashcodeFunction(value) - return hash & (len(as.buckets) - 1) -} - -func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { - return make([][]Collectable[any], cap) -} - -func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { - return make([]Collectable[any], cap) -} - -func newArray2DHashSetWithCap( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, - initCap int, - initBucketCap int, -) *array2DHashSet { - if hashcodeFunction == nil { - hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - equalsFunction = standardEqualsFunction - } - - ret := &array2DHashSet{ - hashcodeFunction: hashcodeFunction, - equalsFunction: equalsFunction, - - n: 0, - threshold: int(math.Floor(_initalCapacity * _loadFactor)), - - currentPrime: 1, - initialBucketCapacity: initBucketCap, - } - - ret.buckets = ret.createBuckets(initCap) - return ret -} - -func newArray2DHashSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, -) *array2DHashSet { - return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) -} diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore new file mode 100644 index 00000000000..38ea34ff513 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore @@ -0,0 +1,18 @@ +### Go template + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + + +# Go workspace file +go.work + +# No Goland stuff in this repo +.idea diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE new file mode 100644 index 00000000000..a22292eb5ae --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md new file mode 100644 index 00000000000..03e5b83eb1b --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/README.md @@ -0,0 +1,54 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr) +[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/) +# ANTLR4 Go Runtime Module Repo + +IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here. + + - Do not submit PRs or any change requests to this repo + - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR + - This repo contains the Go runtime that your generated projects should import + +## Introduction + +This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained +at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create +the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here. + +The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically. + +### Why? + +The `go get` command is unable to retrieve the Go runtime when it is embedded so +deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime, +does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and +causes confusion. + +For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc +) +``` + +Where you would expect to see: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0 +) +``` + +The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and +from whence users can expect `go get` to behave as expected. + + +# Documentation +Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on +migrating existing projects to use the new module location and for information on how to use the Go runtime in +general. diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go new file mode 100644 index 00000000000..3bb4fd7c4e0 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -0,0 +1,102 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Go Runtime + +At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime +source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of +ANTLR4 that it is compatible with (I.E. uses the /v4 path). + +However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root +of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. +This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not +list the release tag such as @4.12.0 - this was confusing, to say the least. + +As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, +which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. + +This means that if you are using the source code without modules, you should also use the source code in the [new repo]. +Though we highly recommend that you use go modules, as they are now idiomatic for Go. + +I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good. + +Go runtime author: [Jim Idle] jimi@idle.ws + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as +it was at any point in its history. + +Here is a general/recommended template for an ANTLR based recognizer in Go: + + . + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.12.1-complete.jar + │ ├── generate.go + │ └── generate.sh + ├── parsing - generated code goes here + │ └── error_listeners.go + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in. + +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here +is to generate the code into a + +From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command: + + go generate ./... + +Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code +by importing the parsing package. + +There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like. + +# Copyright Notice + +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md +[new repo]: https://github.com/antlr4-go/antlr +[Jim Idle]: https://github.com/jimidle +[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md +*/ +package antlr diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go similarity index 94% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go rename to vendor/github.com/antlr4-go/antlr/v4/atn.go index 98010d2e6e6..cdeefed2478 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -20,10 +20,11 @@ var ATNInvalidAltNumber int // [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf // [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we + + // DecisionToState is the decision points for all rules, sub-rules, optional + // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we // can go back later and build DFA predictors for them. This includes - // all the rules, subrules, optional blocks, ()+, ()* etc... + // all the rules, sub-rules, optional blocks, ()+, ()* etc... DecisionToState []DecisionState // grammarType is the ATN type and is used for deserializing ATNs from strings. @@ -51,6 +52,8 @@ type ATN struct { // specified, and otherwise is nil. ruleToTokenType []int + // ATNStates is a list of all states in the ATN, ordered by state number. + // states []ATNState mu sync.Mutex diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go new file mode 100644 index 00000000000..a83f25d3492 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -0,0 +1,335 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +const ( + lexerConfig = iota // Indicates that this ATNConfig is for a lexer + parserConfig // Indicates that this ATNConfig is for a parser +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive in the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context *PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int + cType int // lexerConfig or parserConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only +func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + return NewATNConfig5(state, alt, context, SemanticContextNone) +} + +// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context +func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + pac := &ATNConfig{} + pac.state = state + pac.alt = alt + pac.context = context + pac.semanticContext = semanticContext + pac.cType = parserConfig + return pac +} + +// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only +func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context +func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), semanticContext) +} + +// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only +func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only +func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + return NewATNConfig(c, state, context, c.GetSemanticContext()) +} + +// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' +// are just wrappers around this one. +func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed + } + b := &ATNConfig{} + b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) + b.cType = parserConfig + return b +} + +func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) { + + a.state = state + a.alt = alt + a.context = context + a.semanticContext = semanticContext + a.reachesIntoOuterContext = c.GetReachesIntoOuterContext() + a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed() +} + +func (a *ATNConfig) getPrecedenceFilterSuppressed() bool { + return a.precedenceFilterSuppressed +} + +func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) { + a.precedenceFilterSuppressed = v +} + +// GetState returns the ATN state associated with this configuration +func (a *ATNConfig) GetState() ATNState { + return a.state +} + +// GetAlt returns the alternative associated with this configuration +func (a *ATNConfig) GetAlt() int { + return a.alt +} + +// SetContext sets the rule invocation stack associated with this configuration +func (a *ATNConfig) SetContext(v *PredictionContext) { + a.context = v +} + +// GetContext returns the rule invocation stack associated with this configuration +func (a *ATNConfig) GetContext() *PredictionContext { + return a.context +} + +// GetSemanticContext returns the semantic context associated with this configuration +func (a *ATNConfig) GetSemanticContext() SemanticContext { + return a.semanticContext +} + +// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration +func (a *ATNConfig) GetReachesIntoOuterContext() int { + return a.reachesIntoOuterContext +} + +// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration +func (a *ATNConfig) SetReachesIntoOuterContext(v int) { + a.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool { + switch a.cType { + case lexerConfig: + return a.LEquals(o) + case parserConfig: + return a.PEquals(o) + default: + panic("Invalid ATNConfig type") + } +} + +// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool { + var other, ok = o.(*ATNConfig) + + if !ok { + return false + } + if a == other { + return true + } else if other == nil { + return false + } + + var equal bool + + if a.context == nil { + equal = other.context == nil + } else { + equal = a.context.Equals(other.context) + } + + var ( + nums = a.state.GetStateNumber() == other.state.GetStateNumber() + alts = a.alt == other.alt + cons = a.semanticContext.Equals(other.semanticContext) + sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) Hash() int { + switch a.cType { + case lexerConfig: + return a.LHash() + case parserConfig: + return a.PHash() + default: + panic("Invalid ATNConfig type") + } +} + +// PHash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) PHash() int { + var c int + if a.context != nil { + c = a.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, a.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +// String returns a string representation of the ATNConfig, usually used for debugging purposes +func (a *ATNConfig) String() string { + var s1, s2, s3 string + + if a.context != nil { + s1 = ",[" + fmt.Sprint(a.context) + "]" + } + + if a.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(a.semanticContext) + } + + if a.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3) +} + +func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +//goland:noinspection GoUnusedExportedFunction +func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LHash() int { + var f int + if a.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, a.context.Hash()) + h = murmurUpdate(h, a.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, a.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool { + var otherT, ok = other.(*ATNConfig) + if !ok { + return false + } else if a == otherT { + return true + } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision { + return false + } + + switch { + case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil: + return true + case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil: + if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) { + return false + } + default: + return false // One but not both, are nil + } + + return a.PEquals(otherT) +} + +func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go new file mode 100644 index 00000000000..52dbaf8064f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go @@ -0,0 +1,301 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type ATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two ATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]] + + // configs is the added elements that did not match an existing key in configLookup + configs []*ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the ATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this set. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +// Alts returns the combined set of alts for all the configurations in this set. +func (b *ATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +// NewATNConfigSet creates a new ATNConfigSet instance. +func NewATNConfigSet(fullCtx bool) *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), +// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and +// 'pi' is the [ATNConfig].semanticContext. +// +// We use (s,i,pi) as the key. +// Updates dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +// GetStates returns the set of states represented by all configurations in this config set +func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator and Hash() provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()") + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *ATNConfigSet) GetPredicates() []SemanticContext { + predicates := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + predicates = append(predicates, c) + } + } + + return predicates +} + +func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + // Empty indicate no optimization is possible + if b.configLookup == nil || b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare The configs are only equal if they are in the same order and their Equals function returns true. +// Java uses ArrayList.equals(), which requires the same order. +func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + for i := 0; i < len(b.configs); i++ { + if !b.configs[i].Equals(bs.configs[i]) { + return false + } + } + + return true +} + +func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*ATNConfigSet); !ok { + return false + } + + other2 := other.(*ATNConfigSet) + var eca bool + switch { + case b.conflictingAlts == nil && other2.conflictingAlts == nil: + eca = true + case b.conflictingAlts != nil && other2.conflictingAlts != nil: + eca = b.conflictingAlts.equals(other2.conflictingAlts) + } + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + eca && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *ATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *ATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *ATNConfigSet) Contains(item *ATNConfig) bool { + if b.readOnly { + panic("not implemented for read-only sets") + } + if b.configLookup == nil { + return false + } + return b.configLookup.Contains(item) +} + +func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool { + return b.Contains(item) +} + +func (b *ATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + b.configs = make([]*ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()") +} + +func (b *ATNConfigSet) String() string { + + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair +// for use in lexers. +func NewOrderedATNConfigSet() *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + // This set uses the standard Hash() and Equals() from ATNConfig + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"), + fullCtx: false, + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go similarity index 86% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go index 3c975ec7bfd..bdb30b36229 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go @@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool { func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.readOnly = readOnly } @@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool { func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.verifyATN = verifyATN } @@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.generateRuleBypassTransitions = generateRuleBypassTransitions } +//goland:noinspection GoUnusedExportedFunction func DefaultATNDeserializationOptions() *ATNDeserializationOptions { return NewATNDeserializationOptions(&defaultATNDeserializationOptions) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go similarity index 97% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go index 3888856b4b6..2dcb9ae11b6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go @@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { return &ATNDeserializer{options: options} } +//goland:noinspection GoUnusedFunction func stringInSlice(a string, list []string) int { for i, b := range list { if b == a { @@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) { } } -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet { +func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet { m := a.readInt() // Preallocate the needed capacity. @@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { bypassStart.endState = bypassStop - atn.defineDecisionState(bypassStart.BaseDecisionState) + atn.defineDecisionState(&bypassStart.BaseDecisionState) bypassStop.startState = bypassStart @@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { continue } - // We analyze the ATN to determine if a ATN decision state is the + // We analyze the [ATN] to determine if an ATN decision state is the // decision for the closure block that determines whether a // precedence rule should continue or complete. if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { @@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int { return int(v) // data is 32 bits but int is at least that big } -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { target := atn.states[trg] switch typeIndex { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go similarity index 66% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go index 41529115fa6..afe6c9f809a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go @@ -4,7 +4,7 @@ package antlr -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false)) type IATNSimulator interface { SharedContextCache() *PredictionContextCache @@ -18,22 +18,13 @@ type BaseATNSimulator struct { decisionToDFA []*DFA } -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { +func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext { if b.sharedContextCache == nil { return context } - visited := make(map[PredictionContext]PredictionContext) - + //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()") + visited := NewVisitRecord() return getCachedBasePredictionContext(context, b.sharedContextCache, visited) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go similarity index 65% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_state.go index 1f2a56bc311..2ae5807cdb8 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go @@ -4,7 +4,11 @@ package antlr -import "strconv" +import ( + "fmt" + "os" + "strconv" +) // Constants for serialization. const ( @@ -25,6 +29,7 @@ const ( ATNStateInvalidStateNumber = -1 ) +//goland:noinspection GoUnusedGlobalVariable var ATNStateInitialNumTransitions = 4 type ATNState interface { @@ -73,7 +78,7 @@ type BaseATNState struct { transitions []Transition } -func NewBaseATNState() *BaseATNState { +func NewATNState() *BaseATNState { return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} } @@ -148,27 +153,46 @@ func (as *BaseATNState) AddTransition(trans Transition, index int) { if len(as.transitions) == 0 { as.epsilonOnlyTransitions = trans.getIsEpsilon() } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber) as.epsilonOnlyTransitions = false } + // TODO: Check code for already present compared to the Java equivalent + //alreadyPresent := false + //for _, t := range as.transitions { + // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() { + // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) { + // alreadyPresent = true + // break + // } + // } else if t.getIsEpsilon() && trans.getIsEpsilon() { + // alreadyPresent = true + // break + // } + //} + //if !alreadyPresent { if index == -1 { as.transitions = append(as.transitions, trans) } else { as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) // TODO: as.transitions.splice(index, 1, trans) } + //} else { + // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber) + //} } type BasicState struct { - *BaseATNState + BaseATNState } func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} + return &BasicState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + } } type DecisionState interface { @@ -182,13 +206,19 @@ type DecisionState interface { } type BaseDecisionState struct { - *BaseATNState + BaseATNState decision int nonGreedy bool } func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} + return &BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + } } func (s *BaseDecisionState) getDecision() int { @@ -216,12 +246,20 @@ type BlockStartState interface { // BaseBlockStartState is the start of a regular (...) block. type BaseBlockStartState struct { - *BaseDecisionState + BaseDecisionState endState *BlockEndState } func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} + return &BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + }, + } } func (s *BaseBlockStartState) getEndState() *BlockEndState { @@ -233,31 +271,38 @@ func (s *BaseBlockStartState) setEndState(b *BlockEndState) { } type BasicBlockStartState struct { - *BaseBlockStartState + BaseBlockStartState } func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} + return &BasicBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockStart, + }, + }, + }, + } } var _ BlockStartState = &BasicBlockStartState{} // BlockEndState is a terminal node of a simple (a|b|c) block. type BlockEndState struct { - *BaseATNState + BaseATNState startState ATNState } func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} + return &BlockEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockEnd, + }, + startState: nil, + } } // RuleStopState is the last node in the ATN for a rule, unless that rule is the @@ -265,43 +310,48 @@ func NewBlockEndState() *BlockEndState { // encode references to all calls to this rule to compute FOLLOW sets for error // handling. type RuleStopState struct { - *BaseATNState + BaseATNState } func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} + return &RuleStopState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStop, + }, + } } type RuleStartState struct { - *BaseATNState + BaseATNState stopState ATNState isPrecedenceRule bool } func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} + return &RuleStartState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStart, + }, + } } // PlusLoopbackState is a decision state for A+ and (A|B)+. It has two // transitions: one to the loop back to start of the block, and one to exit. type PlusLoopbackState struct { - *BaseDecisionState + BaseDecisionState } func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} + return &PlusLoopbackState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusLoopBack, + }, + }, + } } // PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a @@ -309,85 +359,103 @@ func NewPlusLoopbackState() *PlusLoopbackState { // it is included for completeness. In reality, PlusLoopbackState is the real // decision-making node for A+. type PlusBlockStartState struct { - *BaseBlockStartState + BaseBlockStartState loopBackState ATNState } func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} + return &PlusBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusBlockStart, + }, + }, + }, + } } var _ BlockStartState = &PlusBlockStartState{} // StarBlockStartState is the block that begins a closure loop. type StarBlockStartState struct { - *BaseBlockStartState + BaseBlockStartState } func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} + return &StarBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarBlockStart, + }, + }, + }, + } } var _ BlockStartState = &StarBlockStartState{} type StarLoopbackState struct { - *BaseATNState + BaseATNState } func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} + return &StarLoopbackState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopBack, + }, + } } type StarLoopEntryState struct { - *BaseDecisionState + BaseDecisionState loopBackState ATNState precedenceRuleDecision bool } func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} + return &StarLoopEntryState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopEntry, + }, + }, + } } // LoopEndState marks the end of a * or + loop. type LoopEndState struct { - *BaseATNState + BaseATNState loopBackState ATNState } func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} + return &LoopEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateLoopEnd, + }, + } } // TokensStartState is the Tokens rule start state linking to each lexer rule start state. type TokensStartState struct { - *BaseDecisionState + BaseDecisionState } func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} + return &TokensStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateTokenStart, + }, + }, + } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_type.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go similarity index 89% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/char_stream.go index c33f0adb5e1..bd8127b6b54 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go @@ -8,5 +8,5 @@ type CharStream interface { IntStream GetText(int, int) string GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go rename to vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go similarity index 88% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go index c6c9485a20a..b75da9df08e 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go @@ -28,22 +28,24 @@ type CommonTokenStream struct { // trivial with bt field. fetchedEOF bool - // index indexs into tokens of the current token (next token to consume). + // index into [tokens] of the current token (next token to consume). // tokens[p] should be LT(1). It is set to -1 when the stream is first // constructed or when SetTokenSource is called, indicating that the first token // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. + // see the documentation of [IntStream] for a description of initializing methods. index int - // tokenSource is the TokenSource from which tokens for the bt stream are + // tokenSource is the [TokenSource] from which tokens for the bt stream are // fetched. tokenSource TokenSource - // tokens is all tokens fetched from the token source. The list is considered a + // tokens contains all tokens fetched from the token source. The list is considered a // complete view of the input once fetchedEOF is set to true. tokens []Token } +// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce +// tokens and will pull tokens from the given lexer channel. func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { return &CommonTokenStream{ channel: channel, @@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { } } +// GetAllTokens returns all tokens currently pulled from the token source. func (c *CommonTokenStream) GetAllTokens() []Token { return c.tokens } @@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int { return 0 } -func (c *CommonTokenStream) Release(marker int) {} +func (c *CommonTokenStream) Release(_ int) {} -func (c *CommonTokenStream) reset() { +func (c *CommonTokenStream) Reset() { + c.fetchedEOF = false + c.tokens = make([]Token, 0) c.Seek(0) } @@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() { // Sync makes sure index i in tokens has a token and returns true if a token is // located at index i and otherwise false. func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + n := i - len(c.tokens) + 1 // How many more elements do we need? if n > 0 { fetched := c.fetch(n) @@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { c.tokenSource = tokenSource c.tokens = make([]Token, 0) c.index = -1 + c.fetchedEOF = false } // NextTokenOnChannel returns the index of the next token on channel given a // starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { +// no tokens on channel between 'i' and [TokenEOF]. +func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int { c.Sync(i) if i >= len(c.tokens) { @@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) from := tokenIndex + 1 - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token var to int if nextOnChannel == -1 { @@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int { } func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) + c.Fill() + return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1)) } func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { @@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string return c.GetTextFromInterval(interval.GetSourceInterval()) } -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { +func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string { c.lazyInit() - - if interval == nil { - c.Fill() - interval = NewInterval(0, len(c.tokens)-1) - } else { - c.Sync(interval.Stop) - } + c.Sync(interval.Stop) start := interval.Start stop := interval.Stop diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go similarity index 82% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go rename to vendor/github.com/antlr4-go/antlr/v4/comparators.go index 9ea3200536a..7467e9b43db 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go +++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go @@ -18,17 +18,20 @@ package antlr // type safety and avoid having to implement this for every type that we want to perform comparison on. // // This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which -// allows us to use it in any collection instance that does nto require a special hash or equals implementation. +// allows us to use it in any collection instance that does not require a special hash or equals implementation. type ObjEqComparator[T Collectable[T]] struct{} var ( - aStateEqInst = &ObjEqComparator[ATNState]{} - aConfEqInst = &ObjEqComparator[ATNConfig]{} - aConfCompInst = &ATNConfigComparator[ATNConfig]{} - atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{} + aStateEqInst = &ObjEqComparator[ATNState]{} + aConfEqInst = &ObjEqComparator[*ATNConfig]{} + + // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache + aConfCompInst = &ATNConfigComparator[*ATNConfig]{} + atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{} dfaStateEqInst = &ObjEqComparator[*DFAState]{} semctxEqInst = &ObjEqComparator[SemanticContext]{} - atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{} + atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{} + pContextEqInst = &ObjEqComparator[*PredictionContext]{} ) // Equals2 delegates to the Equals() method of type T @@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int { type SemCComparator[T Collectable[T]] struct{} -// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet +// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type ATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int { + hash := 7 hash = 31*hash + o.GetState().GetStateNumber() hash = 31*hash + o.GetAlt() @@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int { h := murmurInit(7) h = murmurUpdate(h, o.GetState().GetStateNumber()) h = murmurUpdate(h, o.GetContext().Hash()) return murmurFinish(h, 2) } -// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet +// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type BaseATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet -func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just // delegates to the standard Hash() method of the ATNConfig type. -func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { - +func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int { return o.Hash() } diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go new file mode 100644 index 00000000000..c2b724514d4 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go @@ -0,0 +1,214 @@ +package antlr + +type runtimeConfiguration struct { + statsTraceStacks bool + lexerATNSimulatorDebug bool + lexerATNSimulatorDFADebug bool + parserATNSimulatorDebug bool + parserATNSimulatorTraceATNSim bool + parserATNSimulatorDFADebug bool + parserATNSimulatorRetryDebug bool + lRLoopEntryBranchOpt bool + memoryManager bool +} + +// Global runtime configuration +var runtimeConfig = runtimeConfiguration{ + lRLoopEntryBranchOpt: true, +} + +type runtimeOption func(*runtimeConfiguration) error + +// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options. +// It uses the functional options pattern for go. This is a package global function as it operates on the runtime +// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is +// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the +// only maintainer). However, it is possible that you might want to use this to set a global option concerning the +// memory allocation type used by the runtime such as sync.Pool or not. +// +// The options are applied in the order they are passed in, so the last option will override any previous options. +// +// For example, if you want to turn on the collection create point stack flag to true, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// If you want to turn it off, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func ConfigureRuntime(options ...runtimeOption) error { + for _, option := range options { + err := option(&runtimeConfig) + if err != nil { + return err + } + } + return nil +} + +// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of +// certain structs, such as collections, or the use point of certain methods such as Put(). +// Because this can be expensive, it is turned off by default. However, it +// can be useful to track down exactly where memory is being created and used. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func WithStatsTraceStacks(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.statsTraceStacks = trace + return nil + } +} + +// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false)) +func WithLexerATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDebug = debug + return nil + } +} + +// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false)) +func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false)) +func WithParserATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDebug = debug + return nil + } +} + +// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator +// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false)) +func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorTraceATNSim = trace + return nil + } +} + +// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false)) +func WithParserATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime. +// Only useful to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false)) +func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorRetryDebug = debug + return nil + } +} + +// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be +// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime. +// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator]. +// +// Note that default is to use this optimization. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false)) +func WithLRLoopEntryBranchOpt(off bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lRLoopEntryBranchOpt = off + return nil + } +} + +// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful +// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which +// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute +// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource +// grammar, this may help make it more practical. +// +// Note that default is to use normal Go memory allocation and not pool memory. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithMemoryManager(true)) +// +// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other +// and should remember to nil out any references to the parser or lexer when you are done with them. +func WithMemoryManager(use bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.memoryManager = use + return nil + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go similarity index 76% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa.go index bfd43e1f731..6b63eb1589b 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go @@ -4,6 +4,8 @@ package antlr +// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can +// reach and the transitions between them. type DFA struct { // atnStartState is the ATN state in which this was created atnStartState DecisionState @@ -12,10 +14,9 @@ type DFA struct { // states is all the DFA states. Use Map to get the old state back; Set can only // indicate whether it is there. Go maps implement key hash collisions and so on and are very - // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva + // good, but the DFAState is an object and can't be used directly as the key as it can in say Java // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them - // to see if they really are the same object. - // + // to see if they really are the same object. Hence, we have our own map storage. // states *JStore[*DFAState, *ObjEqComparator[*DFAState]] @@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA { dfa := &DFA{ atnStartState: atnStartState, decision: decision, - states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst), + states: nil, // Lazy initialize } if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { dfa.precedenceDfa = true - dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false)) + dfa.s0 = NewDFAState(-1, NewATNConfigSet(false)) dfa.s0.isAcceptState = false dfa.s0.requiresFullContext = false } @@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool { // true or nil otherwise, and d.precedenceDfa is updated. func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { if d.getPrecedenceDfa() != precedenceDfa { - d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst) + d.states = nil // Lazy initialize d.numstates = 0 if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - + precedenceState := NewDFAState(-1, NewATNConfigSet(false)) precedenceState.setEdges(make([]*DFAState, 0)) precedenceState.isAcceptState = false precedenceState.requiresFullContext = false @@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { } } +// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy +// instantiation of the states JMap. +func (d *DFA) Len() int { + if d.states == nil { + return 0 + } + return d.states.Len() +} + +// Get returns a state that matches s if it is present in the DFA state set. We defer to this +// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap. +func (d *DFA) Get(s *DFAState) (*DFAState, bool) { + if d.states == nil { + return nil, false + } + return d.states.Get(s) +} + +func (d *DFA) Put(s *DFAState) (*DFAState, bool) { + if d.states == nil { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put") + } + return d.states.Put(s) +} + func (d *DFA) getS0() *DFAState { return d.s0 } @@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) { d.s0 = s } -// sortedStates returns the states in d sorted by their state number. +// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil. func (d *DFA) sortedStates() []*DFAState { - + if d.states == nil { + return []*DFAState{} + } vs := d.states.SortedSlice(func(i, j *DFAState) bool { return i.stateNumber < j.stateNumber }) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go similarity index 97% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go index 84d0a31e536..0e11009899c 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go @@ -10,7 +10,7 @@ import ( "strings" ) -// DFASerializer is a DFA walker that knows how to dump them to serialized +// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized // strings. type DFASerializer struct { dfa *DFA diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go similarity index 81% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa_state.go index c90dec55c86..6541430745f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go @@ -22,30 +22,31 @@ func (p *PredPrediction) String() string { return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" } -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi, // Ullman p. 117 says: "The DFA uses its state to keep track of all possible // states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the +// after reading input a1, a2,..an, the DFA is in a state that represents the // subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state +// state along some path labeled a1a2..an." +// +// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of +// states the [ATN] could be in. We need to track the alt predicted by each state // as well, however. More importantly, we need to maintain a stack of states, // tracking the closure operations as they jump from rule to rule, emulating // rule invocations (method calls). I have to add a stack to simulate the proper // lookahead sequences for the underlying LL grammar from which the ATN was // derived. // -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules +// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a +// state (ala normal conversion) and a [RuleContext] describing the chain of rules // (if any) followed to arrive at that state. // -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was +// A [DFAState] may have multiple references to a particular state, but with +// different [ATN] contexts (with same or different alts) meaning that state was // reached via a different set of rule invocations. type DFAState struct { stateNumber int - configs ATNConfigSet + configs *ATNConfigSet // edges elements point to the target of the symbol. Shift up by 1 so (-1) // Token.EOF maps to the first element. @@ -53,7 +54,7 @@ type DFAState struct { isAcceptState bool - // prediction is the ttype we match or alt we predict if the state is accept. + // prediction is the 'ttype' we match or alt we predict if the state is 'accept'. // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or // requiresFullContext. prediction int @@ -81,9 +82,9 @@ type DFAState struct { predicates []*PredPrediction } -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { +func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { if configs == nil { - configs = NewBaseATNConfigSet(false) + configs = NewATNConfigSet(false) } return &DFAState{configs: configs, stateNumber: stateNumber} @@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int { var alts []int if d.configs != nil { - for _, c := range d.configs.GetItems() { + for _, c := range d.configs.configs { alts = append(alts, c.GetAlt()) } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go index c55bcc19b2c..bd2cd8bc3a8 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go +++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go @@ -33,6 +33,7 @@ type DiagnosticErrorListener struct { exactOnly bool } +//goland:noinspection GoUnusedExportedFunction func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { n := new(DiagnosticErrorListener) @@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { return n } -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { if d.exactOnly && !exact { return } @@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) { msg := "reportAttemptingFullContext d=" + d.getDecisionDescription(recognizer, dfa) + @@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) { msg := "reportContextSensitivity d=" + d.getDecisionDescription(recognizer, dfa) + ", input='" + @@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa // @param configs The conflicting or ambiguous configuration set. // @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise // returns the set of alternatives represented in {@code configs}. -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet { if ReportedAlts != nil { return ReportedAlts } result := NewBitSet() - for _, c := range set.GetItems() { + for _, c := range set.configs { result.add(c.GetAlt()) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go similarity index 62% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/error_listener.go index f679f0dcd5e..21a0216434e 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go +++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go @@ -16,28 +16,29 @@ import ( type ErrorListener interface { SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) } type DefaultErrorListener struct { } +//goland:noinspection GoUnusedExportedFunction func NewDefaultErrorListener() *DefaultErrorListener { return new(DefaultErrorListener) } -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { +func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) { } -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) { } -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) { } -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) { } type ConsoleErrorListener struct { @@ -48,21 +49,16 @@ func NewConsoleErrorListener() *ConsoleErrorListener { return new(ConsoleErrorListener) } -// Provides a default instance of {@link ConsoleErrorListener}. +// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}. var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() -// {@inheritDoc} +// SyntaxError prints messages to System.err containing the +// values of line, charPositionInLine, and msg using +// the following format: // -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +// line : +func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) { + _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) } type ProxyErrorListener struct { @@ -85,19 +81,19 @@ func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol } } -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { for _, d := range p.delegates { d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) } } -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { for _, d := range p.delegates { d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) } } -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { for _, d := range p.delegates { d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go similarity index 58% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go rename to vendor/github.com/antlr4-go/antlr/v4/error_strategy.go index 5c0a637ba4a..9db2be1c748 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go +++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go @@ -21,8 +21,8 @@ type ErrorStrategy interface { ReportMatch(Parser) } -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. +// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for +// error reporting and recovery in ANTLR parsers. type DefaultErrorStrategy struct { errorRecoveryMode bool lastErrorIndex int @@ -46,7 +46,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy { // The index into the input stream where the last error occurred. // This is used to prevent infinite loops where an error is found // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least + // ad nauseam. This is a failsafe mechanism to guarantee that at least // one token/tree node is consumed for two errors. // d.lastErrorIndex = -1 @@ -62,50 +62,37 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) { // This method is called to enter error recovery mode when a recognition // exception is Reported. -// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { +func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) { d.errorRecoveryMode = true } -func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { +func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool { return d.errorRecoveryMode } // This method is called to leave error recovery mode after recovering from // a recognition exception. -// -// @param recognizer -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { +func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) { d.errorRecoveryMode = false d.lastErrorStates = nil d.lastErrorIndex = -1 } -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

+// ReportMatch is the default implementation of error matching and simply calls endErrorCondition. func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { d.endErrorCondition(recognizer) } -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -//
    -//
  • {@link NoViableAltException}: Dispatches the call to -// {@link //ReportNoViableAlternative}
  • -//
  • {@link InputMisMatchException}: Dispatches the call to -// {@link //ReportInputMisMatch}
  • -//
  • {@link FailedPredicateException}: Dispatches the call to -// {@link //ReportFailedPredicate}
  • -//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report -// the exception
  • -//
+// ReportError is the default implementation of error reporting. +// It returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls [beginErrorCondition] +// and dispatches the Reporting task based on the runtime type of e +// according to the following table. +// +// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative] +// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch] +// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate] +// All other types : Calls [NotifyErrorListeners] to Report the exception func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { // if we've already Reported an error and have not Matched a token // yet successfully, don't Report any errors. @@ -128,12 +115,10 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep } } -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { +// Recover is the default recovery implementation. +// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set - +// loosely the set of tokens that can follow the current rule. +func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) { if d.lastErrorIndex == recognizer.GetInputStream().Index() && d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { @@ -148,54 +133,58 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException d.lastErrorStates = NewIntervalSet() } d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) + followSet := d.GetErrorRecoverySet(recognizer) d.consumeUntil(recognizer, followSet) } -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. +// Sync is the default implementation of error strategy synchronization. +// +// This Sync makes sure that the current lookahead symbol is consistent with what were expecting +// at this point in the [ATN]. You can call this anytime but ANTLR only +// generates code to check before sub-rules/loops and each iteration. // -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

+// Implements [Jim Idle]'s magic Sync mechanism in closures and optional +// sub-rules. E.g.: // -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
+// a : Sync ( stuff Sync )* +// Sync : {consume to what can follow Sync} // -// At the start of a sub rule upon error, {@link //Sync} performs single +// At the start of a sub-rule upon error, Sync performs single // token deletion, if possible. If it can't do that, it bails on the current // rule and uses the default error recovery, which consumes until the // reSynchronization set of the current rule. // -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

+// If the sub-rule is optional +// +// ({@code (...)?}, {@code (...)*}, // -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

+// or a block with an empty alternative), then the expected set includes what follows +// the sub-rule. // -//

ORIGINS

+// During loop iteration, it consumes until it sees a token that can start a +// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible. // -//

Previous versions of ANTLR did a poor job of their recovery within loops. +// # Origins +// +// Previous versions of ANTLR did a poor job of their recovery within loops. // A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

+// out of the entire rules surrounding the loop. So, for rule: // -//
-// classfunc : 'class' ID '{' member* '}'
-// 
+// classfunc : 'class' ID '{' member* '}' // // input with an extra token between members would force the parser to // consume until it found the next class definition rather than the next // member definition of the current class. // -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

+// This functionality cost a bit of effort because the parser has to +// compare the token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off this +// functionality by simply overriding this method as empty: +// +// { } +// +// [Jim Idle]: https://github.com/jimidle func (d *DefaultErrorStrategy) Sync(recognizer Parser) { // If already recovering, don't try to Sync if d.InErrorRecoveryMode(recognizer) { @@ -217,25 +206,21 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) { if d.SingleTokenDeletion(recognizer) != nil { return } - panic(NewInputMisMatchException(recognizer)) + recognizer.SetError(NewInputMisMatchException(recognizer)) case ATNStatePlusLoopBack, ATNStateStarLoopBack: d.ReportUnwantedToken(recognizer) expecting := NewIntervalSet() expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) + whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer)) d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) default: // do nothing if we can't identify the exact kind of ATN state } } -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError +// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException]. // -// @param recognizer the parser instance -// @param e the recognition exception +// See also [ReportError] func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { tokens := recognizer.GetTokenStream() var input string @@ -252,48 +237,38 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. +// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException] // -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError +// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException]. // -// @param recognizer the parser instance -// @param e the recognition exception +// See also: [ReportError] func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] msg := "rule " + ruleName + " " + e.message recognizer.NotifyErrorListeners(msg, e.offendingToken, e) } -// This method is called to Report a syntax error which requires the removal +// ReportUnwantedToken is called to report a syntax error that requires the removal // of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. +// erroneous symbol is the current LT(1) symbol and has not yet been +// removed from the input stream. When this method returns, +// recognizer is in error recovery mode. // -//

This method is called when {@link //singleTokenDeletion} identifies +// This method is called when singleTokenDeletion identifies // single-token deletion as a viable recovery strategy for a mismatched -// input error.

+// input error. // -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to // enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance +// [NotifyErrorListeners] func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -307,21 +282,18 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { recognizer.NotifyErrorListeners(msg, t, nil) } -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. +// ReportMissingToken is called to report a syntax error which requires the +// insertion of a missing token into the input stream. At the time this +// method is called, the missing token has not yet been inserted. When this +// method returns, recognizer is in error recovery mode. // -//

This method is called when {@link //singleTokenInsertion} identifies +// This method is called when singleTokenInsertion identifies // single-token insertion as a viable recovery strategy for a mismatched -// input error.

+// input error. // -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling [NotifyErrorListeners] func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { if d.InErrorRecoveryMode(recognizer) { return @@ -334,54 +306,48 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { recognizer.NotifyErrorListeners(msg, t, nil) } -//

The default implementation attempts to recover from the mismatched input +// The RecoverInline default implementation attempts to recover from the mismatched input // by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

+// recovery attempt fails, this method panics with [InputMisMatchException}. +// TODO: Not sure that panic() is the right thing to do here - JI // -//

EXTRA TOKEN (single token deletion)

+// # EXTRA TOKEN (single token deletion) // -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious +// LA(1) is not what we are looking for. If LA(2) has the +// right token, however, then assume LA(1) is some extra spurious // token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

+// the LA(2) token) as the successful result of the Match operation. // -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

+// # This recovery strategy is implemented by singleTokenDeletion // -//

MISSING TOKEN (single token insertion)

+// # MISSING TOKEN (single token insertion) // -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

+// If current token -at LA(1) - is consistent with what could come +// after the expected LA(1) token, then assume the token is missing +// and use the parser's [TokenFactory] to create it on the fly. The +// “insertion” is performed by returning the created token as the successful +// result of the Match operation. // -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

+// This recovery strategy is implemented by [SingleTokenInsertion]. // -//

EXAMPLE

+// # Example // -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

+// For example, Input i=(3 is clearly missing the ')'. When +// the parser returns from the nested call to expr, it will have +// call the chain: // -//
-// stat &rarr expr &rarr atom
-// 
+// stat → expr → atom // -// and it will be trying to Match the {@code ')'} at d point in the +// and it will be trying to Match the ')' at this point in the // derivation: // -//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-// 
+// : ID '=' '(' INT ')' ('+' atom)* ';' +// ^ // -// The attempt to Match {@code ')'} will fail when it sees {@code ”} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. +// The attempt to [Match] ')' will fail when it sees ';' and +// call [RecoverInline]. To recover, it sees that LA(1)==';' +// is in the set of tokens that can follow the ')' token reference +// in rule atom. It can assume that you forgot the ')'. func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { // SINGLE TOKEN DELETION MatchedSymbol := d.SingleTokenDeletion(recognizer) @@ -396,24 +362,24 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { return d.GetMissingSymbol(recognizer) } // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) + recognizer.SetError(NewInputMisMatchException(recognizer)) + return nil } -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token +// SingleTokenInsertion implements the single-token insertion inline error recovery +// strategy. It is called by [RecoverInline] if the single-token // deletion strategy fails to recover from the mismatched input. If this // method returns {@code true}, {@code recognizer} will be in error recovery // mode. // -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns +// This method determines whether single-token insertion is viable by +// checking if the LA(1) input symbol could be successfully Matched +// if it were instead the LA(2) symbol. If this method returns // {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

+// token with the correct type to produce this behavior.

// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} +// This func returns true if single-token insertion is a viable recovery +// strategy for the current mismatched input. func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { currentSymbolType := recognizer.GetTokenStream().LA(1) // if current token is consistent with what could come after current @@ -431,23 +397,21 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { return false } -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover +// SingleTokenDeletion implements the single-token deletion inline error recovery +// strategy. It is called by [RecoverInline] to attempt to recover // from mismatched input. If this method returns nil, the parser and error // handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the +// recognizer will not be in error recovery mode since the // returned token was a successful Match. // -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

+// If the single-token deletion is successful, this method calls +// [ReportUnwantedToken] to Report the error, followed by +// [Consume] to actually “delete” the extraneous token. Then, +// before returning, [ReportMatch] is called to signal a successful +// Match. // -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} +// The func returns the successfully Matched [Token] instance if single-token +// deletion successfully recovers from the mismatched input, otherwise nil. func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { NextTokenType := recognizer.GetTokenStream().LA(2) expecting := d.GetExpectedTokens(recognizer) @@ -467,24 +431,28 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { return nil } -// Conjure up a missing token during error recovery. +// GetMissingSymbol conjures up a missing token during error recovery. // // The recognizer attempts to recover from single missing // symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes +// For example: +// +// x=ID {f($x)}. +// +// The action clearly assumes // that there has been an identifier Matched previously and that // $x points at that token. If that token is missing, but // the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we +// this token is missing, and we keep going. Because we // have to return some token to replace the missing token, // we have to conjure one up. This method gives the user control // over the tokens returned for missing tokens. Mostly, // you will want to create something special for identifier // tokens. For literals such as '{' and ',', the default // action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. +// a [CommonToken] of the appropriate type. The text will be the token name. +// If you need to change which tokens must be created by the lexer, +// override this method to create the appropriate tokens. func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { currentSymbol := recognizer.GetCurrentToken() expecting := d.GetExpectedTokens(recognizer) @@ -498,7 +466,7 @@ func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { if expectedTokenType > 0 && expectedTokenType < len(ln) { tokenText = "" } else { - tokenText = "" // TODO matches the JS impl + tokenText = "" // TODO: matches the JS impl } } current := currentSymbol @@ -516,13 +484,13 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet return recognizer.GetExpectedTokens() } -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about +// GetTokenErrorDisplay determines how a token should be displayed in an error message. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override this func in that case +// to use t.String() (which, for [CommonToken], dumps everything about // the token). This is better than forcing you to override a method in // your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. +// so that it creates a new type. func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { if t == nil { return "" @@ -545,52 +513,57 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { return "'" + s + "'" } -// Compute the error recovery set for the current rule. During +// GetErrorRecoverySet computes the error recovery set for the current rule. During // rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to +// follow that rule reference on the stack. This amounts to // computing FIRST of what follows the rule reference in the // enclosing rule. See LinearApproximator.FIRST(). +// // This local follow set only includes tokens // from within the rule i.e., the FIRST computation done by // ANTLR stops at the end of a rule. // -// # EXAMPLE +// # Example // // When you find a "no viable alt exception", the input is not // consistent with any of the alternatives for rule r. The best // thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. +// can legally follow a call to r or any rule that called r. // You don't want the exact set of viable next tokens because the // input might just be missing a token--you might consume the // rest of the input looking for one of the missing tokens. // -// Consider grammar: +// Consider the grammar: +// +// a : '[' b ']' +// | '(' b ')' +// ; // -// a : '[' b ']' -// | '(' b ')' +// b : c '^' INT +// ; // -// b : c '^' INT -// c : ID -// | INT +// c : ID +// | INT +// ; // // At each rule invocation, the set of tokens that could follow // that rule is pushed on a stack. Here are the various // context-sensitive follow sets: // -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' // -// Upon erroneous input "[]", the call chain is +// Upon erroneous input “[]”, the call chain is // -// a -> b -> c +// a → b → c // // and, hence, the follow context stack is: // -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c +// Depth Follow set Start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c // // Notice that ')' is not included, because b would have to have // been called from a different context in rule a for ')' to be @@ -598,11 +571,14 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // // For error recovery, we cannot consider FOLLOW(c) // (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that +// all context-sensitive FOLLOW sets - the set of all tokens that // could follow any reference in the call chain. We need to // reSync to one of those tokens. Note that FOLLOW(c)='^' and if // we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. +// Sync to context-sensitive FOLLOWs for a, b, and c: +// +// {']','^'} +// // In this case, for input "[]", LA(1) is ']' and in the set, so we would // not consume anything. After printing an error, rule c would // return normally. Rule b would not find the required '^' though. @@ -620,22 +596,19 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { // // ANTLR's error recovery mechanism is based upon original ideas: // -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs] by Niklaus Wirth and +// [A note on error recovery in recursive descent parsers]. // -// Later, Josef Grosch had some good ideas: +// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers] // -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead +// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs // -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { +// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE +// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet { atn := recognizer.GetInterpreter().atn ctx := recognizer.GetParserRuleContext() recoverSet := NewIntervalSet() @@ -660,40 +633,36 @@ func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) } } -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors +// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors // by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes +// [ParseCancellationException]. The implementation ensures that the +// [ParserRuleContext//exception] field is set for all parse tree nodes // that were not completed prior to encountering the error. // -//

-// This error strategy is useful in the following scenarios.

+// This error strategy is useful in the following scenarios. // -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
+// - Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of [BailErrorStrategy.Sync] improves the performance of +// the first stage. // -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

+// - Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the [BailErrorStrategy] avoids wasting work on recovering from errors +// when the result will be ignored either way. // -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - +// myparser.SetErrorHandler(NewBailErrorStrategy()) +// +// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)] type BailErrorStrategy struct { *DefaultErrorStrategy } var _ ErrorStrategy = &BailErrorStrategy{} +//goland:noinspection GoUnusedExportedFunction func NewBailErrorStrategy() *BailErrorStrategy { b := new(BailErrorStrategy) @@ -703,10 +672,10 @@ func NewBailErrorStrategy() *BailErrorStrategy { return b } -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. +// Recover Instead of recovering from exception e, re-panic it wrapped +// in a [ParseCancellationException] so it is not caught by the +// rule func catches. Use Exception.GetCause() to get the +// original [RecognitionException]. func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { context := recognizer.GetParserRuleContext() for context != nil { @@ -717,10 +686,10 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { context = nil } } - panic(NewParseCancellationException()) // TODO we don't emit e properly + recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly } -// Make sure we don't attempt to recover inline if the parser +// RecoverInline makes sure we don't attempt to recover inline if the parser // successfully recovers, it won't panic an exception. func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { b.Recover(recognizer, NewInputMisMatchException(recognizer)) @@ -728,7 +697,6 @@ func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { return nil } -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass +// Sync makes sure we don't attempt to recover from problems in sub-rules. +func (b *BailErrorStrategy) Sync(_ Parser) { } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go similarity index 73% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go rename to vendor/github.com/antlr4-go/antlr/v4/errors.go index 3954c137829..8f0f2f601fd 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go +++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go @@ -35,7 +35,7 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In // } else { // stack := NewError().stack // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int + // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int t := new(BaseRecognitionException) @@ -43,15 +43,17 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In t.recognizer = recognizer t.input = input t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams + + // The current Token when an error occurred. Since not all streams // support accessing symbols by index, we have to track the {@link Token} // instance itself. + // t.offendingToken = nil + // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. + // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the + // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match. + // t.offendingState = -1 if t.recognizer != nil { t.offendingState = t.recognizer.GetState() @@ -74,15 +76,15 @@ func (b *BaseRecognitionException) GetInputStream() IntStream { //

If the state number is not known, b method returns -1.

-// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. +// getExpectedTokens gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time this exception was raised. // -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

+// If the set of expected tokens is not known and could not be computed, +// this method returns nil. // -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / +// The func returns the set of token types that could potentially follow the current +// state in the {ATN}, or nil if the information is not available. + func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { if b.recognizer != nil { return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) @@ -99,10 +101,10 @@ type LexerNoViableAltException struct { *BaseRecognitionException startIndex int - deadEndConfigs ATNConfigSet + deadEndConfigs *ATNConfigSet } -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { l := new(LexerNoViableAltException) @@ -128,14 +130,16 @@ type NoViableAltException struct { startToken Token offendingToken Token ctx ParserRuleContext - deadEndConfigs ATNConfigSet + deadEndConfigs *ATNConfigSet } -// Indicates that the parser could not decide which of two or more paths +// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths // to take based upon the remaining input. It tracks the starting token // of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { +// in the various paths when the error. +// +// Reported by [ReportNoViableAlternative] +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { if ctx == nil { ctx = recognizer.GetParserRuleContext() @@ -157,12 +161,14 @@ func NewNoViableAltException(recognizer Parser, input TokenStream, startToken To n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// + // input.LT(1) n.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) + // not be buffering tokens so get a reference to it. + // + // At the time the error occurred, of course the stream needs to keep a + // buffer of all the tokens, but later we might not have access to those. n.startToken = startToken n.offendingToken = offendingToken @@ -173,7 +179,7 @@ type InputMisMatchException struct { *BaseRecognitionException } -// This signifies any kind of mismatched input exceptions such as +// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as // when the current input does not Match the expected token. func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { @@ -186,11 +192,10 @@ func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { } -// A semantic predicate failed during validation. Validation of predicates +// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates // occurs when normally parsing the alternative just like Matching a token. // Disambiguating predicate evaluation occurs when we test a predicate during // prediction. - type FailedPredicateException struct { *BaseRecognitionException @@ -199,6 +204,7 @@ type FailedPredicateException struct { predicate string } +//goland:noinspection GoUnusedExportedFunction func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { f := new(FailedPredicateException) @@ -231,6 +237,21 @@ func (f *FailedPredicateException) formatMessage(predicate, message string) stri type ParseCancellationException struct { } +func (p ParseCancellationException) GetOffendingToken() Token { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetMessage() string { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetInputStream() IntStream { + //TODO implement me + panic("implement me") +} + func NewParseCancellationException() *ParseCancellationException { // Error.call(this) // Error.captureStackTrace(this, ParseCancellationException) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go similarity index 52% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/file_stream.go index bd6ad5efe3d..5f65f809be5 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go @@ -5,8 +5,7 @@ package antlr import ( - "bytes" - "io" + "bufio" "os" ) @@ -14,34 +13,53 @@ import ( // when you construct the object. type FileStream struct { - *InputStream - + InputStream filename string } +//goland:noinspection GoUnusedExportedFunction func NewFileStream(fileName string) (*FileStream, error) { - buf := bytes.NewBuffer(nil) - f, err := os.Open(fileName) if err != nil { return nil, err } - defer f.Close() - _, err = io.Copy(buf, f) + + defer func(f *os.File) { + errF := f.Close() + if errF != nil { + } + }(f) + + reader := bufio.NewReader(f) + fInfo, err := f.Stat() if err != nil { return nil, err } - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) + fs := &FileStream{ + InputStream: InputStream{ + index: 0, + name: fileName, + }, + filename: fileName, + } - fs.InputStream = NewInputStream(s) + // Pre-build the buffer and read runes efficiently + // + fs.data = make([]rune, 0, fInfo.Size()) + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + fs.data = append(fs.data, r) + } + fs.size = len(fs.data) // Size in runes + // All done. + // return fs, nil - } func (f *FileStream) GetSourceName() string { diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go new file mode 100644 index 00000000000..b737fe85fbd --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -0,0 +1,157 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "io" +) + +type InputStream struct { + name string + index int + data []rune + size int +} + +// NewIoStream creates a new input stream from the given io.Reader reader. +// Note that the reader is read completely into memory and so it must actually +// have a stopping point - you cannot pass in a reader on an open-ended source such +// as a socket for instance. +func NewIoStream(reader io.Reader) *InputStream { + + rReader := bufio.NewReader(reader) + + is := &InputStream{ + name: "", + index: 0, + } + + // Pre-build the buffer and read runes reasonably efficiently given that + // we don't exactly know how big the input is. + // + is.data = make([]rune, 0, 512) + for { + r, _, err := rReader.ReadRune() + if err != nil { + break + } + is.data = append(is.data, r) + } + is.size = len(is.data) // number of runes + return is +} + +// NewInputStream creates a new input stream from the given string +func NewInputStream(data string) *InputStream { + + is := &InputStream{ + name: "", + index: 0, + data: []rune(data), // This is actually the most efficient way + } + is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +// Consume moves the input pointer to the next character in the input stream +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +// LA returns the character at the given offset from the start of the input stream +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +// LT returns the character at the given offset from the start of the input stream +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +// Index returns the current offset in to the input stream +func (is *InputStream) Index() int { + return is.index +} + +// Size returns the total number of characters in the input stream +func (is *InputStream) Size() int { + return is.size +} + +// Mark does nothing here as we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +// Release does nothing here as we have entire buffer +func (is *InputStream) Release(_ int) { +} + +// Seek the input point to the provided index offset +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +// GetText returns the text from the input stream from the start to the stop index +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last +// character of the stop token +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "" +} + +// String returns the entire input stream as a string +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/int_stream.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go similarity index 82% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go rename to vendor/github.com/antlr4-go/antlr/v4/interval_set.go index c1e155e8180..cc5066067a2 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go +++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go @@ -14,20 +14,21 @@ type Interval struct { Stop int } -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i +// NewInterval creates a new interval with the given start and stop values. +func NewInterval(start, stop int) Interval { + return Interval{ + Start: start, + Stop: stop, + } } -func (i *Interval) Contains(item int) bool { +// Contains returns true if the given item is contained within the interval. +func (i Interval) Contains(item int) bool { return item >= i.Start && item < i.Stop } -func (i *Interval) String() string { +// String generates a string representation of the interval. +func (i Interval) String() string { if i.Start == i.Stop-1 { return strconv.Itoa(i.Start) } @@ -35,15 +36,18 @@ func (i *Interval) String() string { return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) } -func (i *Interval) length() int { +// Length returns the length of the interval. +func (i Interval) Length() int { return i.Stop - i.Start } +// IntervalSet represents a collection of [Intervals], which may be read-only. type IntervalSet struct { - intervals []*Interval + intervals []Interval readOnly bool } +// NewIntervalSet creates a new empty, writable, interval set. func NewIntervalSet() *IntervalSet { i := new(IntervalSet) @@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet { return i } +func (i *IntervalSet) Equals(other *IntervalSet) bool { + if len(i.intervals) != len(other.intervals) { + return false + } + + for k, v := range i.intervals { + if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop { + return false + } + } + + return true +} + func (i *IntervalSet) first() int { if len(i.intervals) == 0 { return TokenInvalidType @@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) { i.addInterval(NewInterval(l, h+1)) } -func (i *IntervalSet) addInterval(v *Interval) { +func (i *IntervalSet) addInterval(v Interval) { if i.intervals == nil { - i.intervals = make([]*Interval, 0) + i.intervals = make([]Interval, 0) i.intervals = append(i.intervals, v) } else { // find insert pos for k, interval := range i.intervals { // distinct range -> insert if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...) return } else if v.Stop == interval.Start { i.intervals[k].Start = v.Start @@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool { } func (i *IntervalSet) length() int { - len := 0 + iLen := 0 for _, v := range i.intervals { - len += v.length() + iLen += v.Length() } - return len + return iLen } -func (i *IntervalSet) removeRange(v *Interval) { +func (i *IntervalSet) removeRange(v Interval) { if v.Start == v.Stop-1 { i.removeOne(v.Start) } else if i.intervals != nil { @@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) { i.intervals[k] = NewInterval(ni.Start, v.Start) x := NewInterval(v.Stop, ni.Stop) // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } else if v.Start <= ni.Start && v.Stop >= ni.Stop { // i.intervals.splice(k, 1) @@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) { x := NewInterval(ki.Start, v) ki.Start = v + 1 // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } } @@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin return i.toIndexString() } -func (i *IntervalSet) GetIntervals() []*Interval { +func (i *IntervalSet) GetIntervals() []Interval { return i.intervals } diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go new file mode 100644 index 00000000000..ceccd96d258 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -0,0 +1,685 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "container/list" + "runtime/debug" + "sort" + "sync" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +type CollectionSource int +type CollectionDescriptor struct { + SybolicName string + Description string +} + +const ( + UnknownCollection CollectionSource = iota + ATNConfigLookupCollection + ATNStateCollection + DFAStateCollection + ATNConfigCollection + PredictionContextCollection + SemanticContextCollection + ClosureBusyCollection + PredictionVisitedCollection + MergeCacheCollection + PredictionContextCacheCollection + AltSetCollection + ReachSetCollection +) + +var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{ + UnknownCollection: { + SybolicName: "UnknownCollection", + Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.", + }, + ATNConfigCollection: { + SybolicName: "ATNConfigCollection", + Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." + + "For instance, it is used to store the results of the closure() operation in the ATN.", + }, + ATNConfigLookupCollection: { + SybolicName: "ATNConfigLookupCollection", + Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." + + "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.", + }, + ATNStateCollection: { + SybolicName: "ATNStateCollection", + Description: "ATNState collection. This is used to store the states of the ATN.", + }, + DFAStateCollection: { + SybolicName: "DFAStateCollection", + Description: "DFAState collection. This is used to store the states of the DFA.", + }, + PredictionContextCollection: { + SybolicName: "PredictionContextCollection", + Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.", + }, + SemanticContextCollection: { + SybolicName: "SemanticContextCollection", + Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.", + }, + ClosureBusyCollection: { + SybolicName: "ClosureBusyCollection", + Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." + + "It stores ATNConfigs that are currently being processed in the closure() operation.", + }, + PredictionVisitedCollection: { + SybolicName: "PredictionVisitedCollection", + Description: "A map that records whether we have visited a particular context when searching through cached entries.", + }, + MergeCacheCollection: { + SybolicName: "MergeCacheCollection", + Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.", + }, + PredictionContextCacheCollection: { + SybolicName: "PredictionContextCacheCollection", + Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.", + }, + AltSetCollection: { + SybolicName: "AltSetCollection", + Description: "Used to eliminate duplicate alternatives in an ATN config set.", + }, + ReachSetCollection: { + SybolicName: "ReachSetCollection", + Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.", + }, +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] + stats *JStatRec +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + if collectStats { + s.stats = &JStatRec{ + Source: cType, + Description: desc, + } + + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + s.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(s.stats) + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { + + if collectStats { + s.stats.Puts++ + } + kh := s.comparator.Hash1(value) + + var hClash bool + for _, v1 := range s.store[kh] { + hClash = true + if s.comparator.Equals2(value, v1) { + if collectStats { + s.stats.PutHits++ + s.stats.PutHashConflicts++ + } + return v1, true + } + if collectStats { + s.stats.PutMisses++ + } + } + if collectStats && hClash { + s.stats.PutHashConflicts++ + } + s.store[kh] = append(s.store[kh], value) + + if collectStats { + if len(s.store[kh]) > s.stats.MaxSlotSize { + s.stats.MaxSlotSize = len(s.store[kh]) + } + } + s.len++ + if collectStats { + s.stats.CurSize = s.len + if s.len > s.stats.MaxSize { + s.stats.MaxSize = s.len + } + } + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { + if collectStats { + s.stats.Gets++ + } + kh := s.comparator.Hash1(key) + var hClash bool + for _, v := range s.store[kh] { + hClash = true + if s.comparator.Equals2(key, v) { + if collectStats { + s.stats.GetHits++ + s.stats.GetHashConflicts++ + } + return v, true + } + if collectStats { + s.stats.GetMisses++ + } + } + if collectStats { + if hClash { + s.stats.GetHashConflicts++ + } + s.stats.GetNoEnt++ + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + vs = append(vs, e...) + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] + stats *JStatRec +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] { + m := &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) { + if collectStats { + m.stats.Puts++ + } + kh := m.comparator.Hash1(key) + + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.PutHits++ + m.stats.PutHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.PutMisses++ + } + } + if collectStats { + if hClash { + m.stats.PutHashConflicts++ + } + } + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + if collectStats { + if len(m.store[kh]) > m.stats.MaxSlotSize { + m.stats.MaxSlotSize = len(m.store[kh]) + } + } + m.len++ + if collectStats { + m.stats.CurSize = m.len + if m.len > m.stats.MaxSize { + m.stats.MaxSize = m.len + } + } + return val, false +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + if collectStats { + m.stats.Gets++ + } + var none V + kh := m.comparator.Hash1(key) + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.GetHits++ + m.stats.GetHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.GetMisses++ + } + } + if collectStats { + if hClash { + m.stats.GetHashConflicts++ + } + m.stats.GetNoEnt++ + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return m.len +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} + +type JPCMap struct { + store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]] + size int + stats *JStatRec +} + +func NewJPCMap(cType CollectionSource, desc string) *JPCMap { + m := &JPCMap{ + store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + // Do we have a map stored by k1? + // + m2, present := pcm.store.Get(k1) + if present { + if collectStats { + pcm.stats.GetHits++ + } + // We found a map of values corresponding to k1, so now we need to look up k2 in that map + // + return m2.Get(k2) + } + if collectStats { + pcm.stats.GetMisses++ + } + return nil, false +} + +func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) { + + if collectStats { + pcm.stats.Puts++ + } + // First does a map already exist for k1? + // + if m2, present := pcm.store.Get(k1); present { + if collectStats { + pcm.stats.PutHits++ + } + _, present = m2.Put(k2, v) + if !present { + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + } + } else { + // No map found for k1, so we create it, add in our value, then store is + // + if collectStats { + pcm.stats.PutMisses++ + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry") + } else { + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry") + } + + m2.Put(k2, v) + pcm.store.Put(k1, m2) + pcm.size++ + } +} + +type JPCMap2 struct { + store map[int][]JPCEntry + size int + stats *JStatRec +} + +type JPCEntry struct { + k1, k2, v *PredictionContext +} + +func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 { + m := &JPCMap2{ + store: make(map[int][]JPCEntry, 1000), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func dHash(k1, k2 *PredictionContext) int { + return k1.cachedHash*31 + k2.cachedHash +} + +func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.GetHits++ + pcm.stats.GetHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.GetMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.GetHashConflicts++ + } + pcm.stats.GetNoEnt++ + } + return nil, false +} + +func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Puts++ + } + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.PutHits++ + pcm.stats.PutHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.PutMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.PutHashConflicts++ + } + } + pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v}) + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + return nil, false +} + +type VisitEntry struct { + k *PredictionContext + v *PredictionContext +} +type VisitRecord struct { + store map[*PredictionContext]*PredictionContext + len int + stats *JStatRec +} + +type VisitList struct { + cache *list.List + lock sync.RWMutex +} + +var visitListPool = VisitList{ + cache: list.New(), + lock: sync.RWMutex{}, +} + +// NewVisitRecord returns a new VisitRecord instance from the pool if available. +// Note that this "map" uses a pointer as a key because we are emulating the behavior of +// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal, +// which means is the key the same reference to an object rather than is it .equals() to another +// object. +func NewVisitRecord() *VisitRecord { + visitListPool.lock.Lock() + el := visitListPool.cache.Front() + defer visitListPool.lock.Unlock() + var vr *VisitRecord + if el == nil { + vr = &VisitRecord{ + store: make(map[*PredictionContext]*PredictionContext), + } + if collectStats { + vr.stats = &JStatRec{ + Source: PredictionContextCacheCollection, + Description: "VisitRecord", + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + vr.stats.CreateStack = debug.Stack() + } + } + } else { + vr = el.Value.(*VisitRecord) + visitListPool.cache.Remove(el) + vr.store = make(map[*PredictionContext]*PredictionContext) + } + if collectStats { + Statistics.AddJStatRec(vr.stats) + } + return vr +} + +func (vr *VisitRecord) Release() { + vr.len = 0 + vr.store = nil + if collectStats { + vr.stats.MaxSize = 0 + vr.stats.CurSize = 0 + vr.stats.Gets = 0 + vr.stats.GetHits = 0 + vr.stats.GetMisses = 0 + vr.stats.GetHashConflicts = 0 + vr.stats.GetNoEnt = 0 + vr.stats.Puts = 0 + vr.stats.PutHits = 0 + vr.stats.PutMisses = 0 + vr.stats.PutHashConflicts = 0 + vr.stats.MaxSlotSize = 0 + } + visitListPool.lock.Lock() + visitListPool.cache.PushBack(vr) + visitListPool.lock.Unlock() +} + +func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Gets++ + } + v := vr.store[k] + if v != nil { + if collectStats { + vr.stats.GetHits++ + } + return v, true + } + if collectStats { + vr.stats.GetNoEnt++ + } + return nil, false +} + +func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Puts++ + } + vr.store[k] = v + vr.len++ + if collectStats { + vr.stats.CurSize = vr.len + if vr.len > vr.stats.MaxSize { + vr.stats.MaxSize = vr.len + } + } + return v, false +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go similarity index 78% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer.go index 6533f051645..3c7896a9183 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer { // create a single token. NextToken will return l object after // Matching lexer rule(s). If you subclass to allow multiple token // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not + // something non nil so that the auto token emit mechanism will not // emit another token. lexer.token = nil @@ -111,6 +111,7 @@ const ( LexerSkip = -3 ) +//goland:noinspection GoUnusedConst const ( LexerDefaultTokenChannel = TokenDefaultChannel LexerHidden = TokenHiddenChannel @@ -118,7 +119,7 @@ const ( LexerMaxCharValue = 0x10FFFF ) -func (b *BaseLexer) reset() { +func (b *BaseLexer) Reset() { // wack Lexer state variables if b.input != nil { b.input.Seek(0) // rewind the input @@ -176,7 +177,7 @@ func (b *BaseLexer) safeMatch() (ret int) { return b.Interpreter.Match(b.input, b.mode) } -// Return a token from l source i.e., Match a token on the char stream. +// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream. func (b *BaseLexer) NextToken() Token { if b.input == nil { panic("NextToken requires a non-nil input stream.") @@ -205,9 +206,8 @@ func (b *BaseLexer) NextToken() Token { continueOuter := false for { b.thetype = TokenInvalidType - ttype := LexerSkip - ttype = b.safeMatch() + ttype := b.safeMatch() if b.input.LA(1) == TokenEOF { b.hitEOF = true @@ -234,12 +234,11 @@ func (b *BaseLexer) NextToken() Token { } } -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// Skip instructs the lexer to Skip creating a token for current lexer rule +// and look for another token. [NextToken] knows to keep looking when +// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that // if token==nil at end of any token rule, it creates one for you // and emits it. -// / func (b *BaseLexer) Skip() { b.thetype = LexerSkip } @@ -248,23 +247,29 @@ func (b *BaseLexer) More() { b.thetype = LexerMore } +// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode +// will be in force. func (b *BaseLexer) SetMode(m int) { b.mode = m } +// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the +// current lexer mode to the supplied mode m. func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("pushMode " + strconv.Itoa(m)) } b.modeStack.Push(b.mode) b.mode = m } +// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to +// return to. func (b *BaseLexer) PopMode() int { if len(b.modeStack) == 0 { panic("Empty Stack") } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) } i, _ := b.modeStack.Pop() @@ -280,7 +285,7 @@ func (b *BaseLexer) inputStream() CharStream { func (b *BaseLexer) SetInputStream(input CharStream) { b.input = nil b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() + b.Reset() b.input = input b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} } @@ -289,20 +294,19 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { return b.tokenFactorySourcePair } -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / +// EmitToken by default does not support multiple emits per [NextToken] invocation +// for efficiency reasons. Subclass and override this func, [NextToken], +// and [GetToken] (to push tokens into a list and pull from that list +// rather than a single variable as this implementation does). func (b *BaseLexer) EmitToken(token Token) { b.token = token } -// The standard method called to automatically emit a token at the +// Emit is the standard method called to automatically emit a token at the // outermost lexical rule. The token object should point into the // char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. +// use that to set the token's text. Override this method to emit +// custom [Token] objects or provide a new factory. // / func (b *BaseLexer) Emit() Token { t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) @@ -310,6 +314,7 @@ func (b *BaseLexer) Emit() Token { return t } +// EmitEOF emits an EOF token. By default, this is the last token emitted func (b *BaseLexer) EmitEOF() Token { cpos := b.GetCharPositionInLine() lpos := b.GetLine() @@ -318,6 +323,7 @@ func (b *BaseLexer) EmitEOF() Token { return eof } +// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned. func (b *BaseLexer) GetCharPositionInLine() int { return b.Interpreter.GetCharPositionInLine() } @@ -334,13 +340,12 @@ func (b *BaseLexer) SetType(t int) { b.thetype = t } -// What is the index of the current character of lookahead?/// +// GetCharIndex returns the index of the current character of lookahead func (b *BaseLexer) GetCharIndex() int { return b.input.Index() } -// Return the text Matched so far for the current token or any text override. -// Set the complete text of l token it wipes any previous changes to the text. +// GetText returns the text Matched so far for the current token or any text override. func (b *BaseLexer) GetText() string { if b.text != "" { return b.text @@ -349,17 +354,20 @@ func (b *BaseLexer) GetText() string { return b.Interpreter.GetText(b.input) } +// SetText sets the complete text of this token; it wipes any previous changes to the text. func (b *BaseLexer) SetText(text string) { b.text = text } +// GetATN returns the ATN used by the lexer. func (b *BaseLexer) GetATN() *ATN { return b.Interpreter.ATN() } -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / +// GetAllTokens returns a list of all [Token] objects in input char stream. +// Forces a load of all tokens that can be made from the input char stream. +// +// Does not include EOF token. func (b *BaseLexer) GetAllTokens() []Token { vl := b.Virt tokens := make([]Token, 0) @@ -398,11 +406,13 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string { return "'" + b.getErrorDisplayForChar(c) + "'" } -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope +// Recover can normally Match any char in its vocabulary after Matching +// a token, so here we do the easy thing and just kill a character and hope // it all works out. You can instead use the rule invocation stack // to do sophisticated error recovery if you are in a fragment rule. -// / +// +// In general, lexers should not need to recover and should have rules that cover any eventuality, such as +// a character that makes no sense to the recognizer. func (b *BaseLexer) Recover(re RecognitionException) { if b.input.LA(1) != TokenEOF { if _, ok := re.(*LexerNoViableAltException); ok { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go similarity index 78% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_action.go index 111656c2952..eaa7393e06f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go @@ -7,14 +7,29 @@ package antlr import "strconv" const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. + // LexerActionTypeChannel represents a [LexerChannelAction] action. + LexerActionTypeChannel = 0 + + // LexerActionTypeCustom represents a [LexerCustomAction] action. + LexerActionTypeCustom = 1 + + // LexerActionTypeMode represents a [LexerModeAction] action. + LexerActionTypeMode = 2 + + // LexerActionTypeMore represents a [LexerMoreAction] action. + LexerActionTypeMore = 3 + + // LexerActionTypePopMode represents a [LexerPopModeAction] action. + LexerActionTypePopMode = 4 + + // LexerActionTypePushMode represents a [LexerPushModeAction] action. + LexerActionTypePushMode = 5 + + // LexerActionTypeSkip represents a [LexerSkipAction] action. + LexerActionTypeSkip = 6 + + // LexerActionTypeType represents a [LexerTypeAction] action. + LexerActionTypeType = 7 ) type LexerAction interface { @@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction { return la } -func (b *BaseLexerAction) execute(lexer Lexer) { +func (b *BaseLexerAction) execute(_ Lexer) { panic("Not implemented") } @@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool { } func (b *BaseLexerAction) Hash() int { - return b.actionType + h := murmurInit(0) + h = murmurUpdate(h, b.actionType) + return murmurFinish(h, 1) } func (b *BaseLexerAction) Equals(other LexerAction) bool { - return b == other + return b.actionType == other.getActionType() } -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip]. // -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The Skip command does not have any parameters, so this action is +// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE]. type LexerSkipAction struct { *BaseLexerAction } @@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction { return la } -// Provides a singleton instance of l parameterless lexer action. +// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. var LexerSkipActionINSTANCE = NewLexerSkipAction() func (l *LexerSkipAction) execute(lexer Lexer) { lexer.Skip() } +// String returns a string representation of the current [LexerSkipAction]. func (l *LexerSkipAction) String() string { return "skip" } +func (b *LexerSkipAction) Equals(other LexerAction) bool { + return other.getActionType() == LexerActionTypeSkip +} + // Implements the {@code type} lexer action by calling {@link Lexer//setType} // // with the assigned type. @@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string { return "actionType(" + strconv.Itoa(l.thetype) + ")" } -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. +// LexerPushModeAction implements the pushMode lexer action by calling +// [Lexer.pushMode] with the assigned mode. type LexerPushModeAction struct { *BaseLexerAction - mode int } @@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string { return "pushMode(" + strconv.Itoa(l.mode) + ")" } -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. // -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The popMode command does not have any parameters, so this action is +// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] type LexerPopModeAction struct { *BaseLexerAction } @@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string { return "more" } -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with // the assigned mode. type LexerModeAction struct { *BaseLexerAction - mode int } @@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool { } } -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. +// LexerChannelAction implements the channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. type LexerChannelAction struct { *BaseLexerAction - channel int } +// NewLexerChannelAction creates a channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. func NewLexerChannelAction(channel int) *LexerChannelAction { l := new(LexerChannelAction) l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) @@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string { // lexer actions, see {@link LexerActionExecutor//append} and // {@link LexerActionExecutor//fixOffsetBeforeMatch}.

-// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. type LexerIndexedCustomAction struct { *BaseLexerAction - offset int lexerAction LexerAction isPositionDependent bool } +// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset +// with a [LexerAction]. +// +// Note: This class is only required for lexer actions for which +// [LexerAction.isPositionDependent] returns true. +// +// The offset points into the input [CharStream], relative to +// the token start index, at which the specified lexerAction should be +// executed. func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { l := new(LexerIndexedCustomAction) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go similarity index 70% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go index be1ba7a7e30..dfc28c32b30 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go @@ -29,28 +29,20 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { l.lexerActions = lexerActions // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) + // of the performance-critical {@link ATNConfig//hashCode} operation. + l.cachedHash = murmurInit(0) for _, a := range lexerActions { l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) } + l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) return l } -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. +// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for +// the input [LexerActionExecutor] followed by a specified +// [LexerAction]. +// TODO: This does not match the Java code func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { if lexerActionExecutor == nil { return NewLexerActionExecutor([]LexerAction{lexerAction}) @@ -59,47 +51,42 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) } -// Creates a {@link LexerActionExecutor} which encodes the current offset +// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset // for position-dependent lexer actions. // -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end +// Normally, when the executor encounters lexer actions where +// [LexerAction.isPositionDependent] returns true, it calls +// [IntStream.Seek] on the input [CharStream] to set the input +// position to the end of the current token. This behavior provides +// for efficient [DFA] representation of lexer actions which appear at the end // of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

+// characters. // -//

Prior to traversing a Match transition in the ATN, the current offset +// Prior to traversing a Match transition in the [ATN], the current offset // from the token start index is assigned to all position-dependent lexer // actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of +// the offsets relative to the token start index, the [DFA] representation of // lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

+// to sharing among tokens of the same Length, regardless of their absolute +// position in the input stream. // -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

+// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this instance. // -// @param offset The current offset to assign to all position-dependent +// The offset is assigned to all position-dependent // lexer actions which do not already have offsets assigned. // -// @return A {@link LexerActionExecutor} which stores input stream offsets +// The func returns a [LexerActionExecutor] that stores input stream offsets // for all position-dependent lexer actions. -// / func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { var updatedLexerActions []LexerAction for i := 0; i < len(l.lexerActions); i++ { _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) if l.lexerActions[i].getIsPositionDependent() && !ok { if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } + updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) + updatedLexerActions = append(updatedLexerActions, l.lexerActions...) } - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go similarity index 80% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go index c573b752100..fe938b0259a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go @@ -10,10 +10,8 @@ import ( "strings" ) +//goland:noinspection GoUnusedGlobalVariable var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - LexerATNSimulatorMinDFAEdge = 0 LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN @@ -32,11 +30,11 @@ type ILexerATNSimulator interface { } type LexerATNSimulator struct { - *BaseATNSimulator + BaseATNSimulator recog Lexer predictionMode int - mergeCache DoubleDict + mergeCache *JPCMap2 startIndex int Line int CharPositionInLine int @@ -46,27 +44,35 @@ type LexerATNSimulator struct { } func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + l := &LexerATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } l.decisionToDFA = decisionToDFA l.recog = recog + // The current token's starting index into the character stream. // Shared across DFA to ATN simulation in case the ATN fails and the // DFA did not have a previous accept state. In l case, we use the // ATN-generated exception object. l.startIndex = -1 - // line number 1..n within the input/// + + // line number 1..n within the input l.Line = 1 + // The index of the character relative to the beginning of the line - // 0..n-1/// + // 0..n-1 l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration // info l.prevAccept = NewSimState() - // done + return l } @@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() { func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) } oldMode := l.mode @@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { predict := l.execATN(input, next) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) } return predict @@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("start state closure=" + ds0.configs.String()) } if ds0.isAcceptState { - // allow zero-length tokens + // allow zero-Length tokens l.captureSimState(l.prevAccept, input, ds0) } t := input.LA(1) s := ds0 // s is current/from DFA state for { // while more work - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("execATN loop starting closure: " + s.configs.String()) } @@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { } } t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state + s = target // flip current DFA target becomes new src/from state } return l.failOrAccept(l.prevAccept, input, s.configs, t) @@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState return nil } target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) - if LexerATNSimulatorDebug && target != nil { + if runtimeConfig.lexerATNSimulatorDebug && target != nil { fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) } return target } -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. +// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the +// computed state and corresponding edge to the [DFA]. // -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. +// The func returns the computed target [DFA] state for the given input symbol t. +// If this does not lead to a valid [DFA] state, this method +// returns ATNSimulatorError. func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { reach := NewOrderedATNConfigSet() // if we don't find an existing DFA state // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + l.getReachableConfigSet(input, s.configs, reach, t) if len(reach.configs) == 0 { // we got nowhere on t from s if !reach.hasSemanticContext { // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. + // cause a fail-over from DFA later. l.addDFAEdge(s, t, ATNSimulatorError, nil) } // stop when we can't Match any more char return ATNSimulatorError } // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) + return l.addDFAEdge(s, t, nil, reach) } -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { if l.prevAccept.dfaState != nil { lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) @@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) } -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { +// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations +// we can reach upon input t. +// +// Parameter reach is a return parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule + // than a runtimeConfig that already reached an accept state for the same rule SkipAlt := ATNInvalidAltNumber - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + for _, cfg := range closure.configs { + currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt + if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { continue } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) } for _, trans := range cfg.GetState().GetTransitions() { target := l.getReachableTarget(trans, t) if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + lexerActionExecutor := cfg.lexerActionExecutor if lexerActionExecutor != nil { lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + treatEOFAsEpsilon := t == TokenEOF + config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) if l.closure(input, config, reach, currentAltReachedAcceptState, true, treatEOFAsEpsilon) { // any remaining configs for l alt have a lower priority @@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC } func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Printf("ACTION %v\n", lexerActionExecutor) } // seek to after last char in token @@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState return nil } -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { configs := NewOrderedATNConfigSet() for i := 0; i < len(p.GetTransitions()); i++ { target := p.GetTransitions()[i].getTarget() @@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord return configs } -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept +// closure since the alternatives within any lexer decision are ordered by +// preference, this method stops pursuing the closure as soon as an accept // state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. +// search from runtimeConfig, all other (potentially reachable) states for +// this rule would have a lower priority. // -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, +// The func returns true if an accept state is reached. +func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") } _, ok := config.state.(*RuleStopState) if ok { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { if l.recog != nil { fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) } else { @@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co } // side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, + configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { - var cfg *LexerATNConfig + var cfg *ATNConfig if trans.getSerializationType() == TransitionRULE { @@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC pt := trans.(*PredicateTransition) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) } - configs.SetHasSemanticContext(true) + configs.hasSemanticContext = true if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } @@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC // TODO: if the entry rule is invoked recursively, some // actions may be executed during the recursive call. The // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be + // isEmpty() is false. In this case, the config needs to be // split into two contexts - one with just the empty path // and another with everything but the empty path. // Unfortunately, the current algorithm does not allow @@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC return cfg } -// Evaluate a predicate specified in the lexer. +// evaluatePredicate eEvaluates a predicate specified in the lexer. // -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

+// If speculative is true, this method was called before +// [consume] for the Matched character. This method should call +// [consume] before evaluating the predicate to ensure position +// sensitive values, including [GetText], [GetLine], +// and [GetColumn], properly reflect the current +// lexer state. This method should restore input and the simulator +// to the original state before returning, i.e. undo the actions made by the +// call to [Consume]. // -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / +// The func returns true if the specified predicate evaluates to true. func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided if l.recog == nil { @@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream settings.dfaState = dfaState } -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { if to == nil && cfgs != nil { // leading to l call, ATNConfigSet.hasSemanticContext is used as a // marker indicating dynamic predicate evaluation makes l edge @@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // TJP notes: next time through the DFA, we see a pred again and eval. // If that gets us to a previously created (but dangling) DFA // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - + // + suppressEdge := cfgs.hasSemanticContext + cfgs.hasSemanticContext = false to = l.addDFAState(cfgs, true) if suppressEdge { @@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // Only track edges within the DFA bounds return to } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) } l.atn.edgeMu.Lock() @@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // configurations already. This method also detects the first // configuration containing an ATN rule stop state. Later, when // traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { +func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { + var firstConfigWithRuleStopState *ATNConfig + for _, cfg := range configs.configs { _, ok := cfg.GetState().(*RuleStopState) if ok { @@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) } if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, present := dfa.states.Get(proposed) + existing, present := dfa.Get(proposed) if present { // This state was already present, so just return it. @@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) // We need to add the new state // - proposed.stateNumber = dfa.states.Len() - configs.SetReadOnly(true) + proposed.stateNumber = dfa.Len() + configs.readOnly = true + configs.configLookup = nil // Not needed now proposed.configs = configs - dfa.states.Put(proposed) + dfa.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) @@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA { return l.decisionToDFA[mode] } -// Get the text Matched so far for the current token. +// GetText returns the text [Match]ed so far for the current token. func (l *LexerATNSimulator) GetText(input CharStream) string { // index is first lookahead char, don't include. return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go similarity index 73% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go rename to vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go index 76689615a6d..4955ac876f9 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -14,11 +14,11 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer { return la } -// - Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -// -// / const ( + // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit + // a predicate during analysis if + // + // seeThruPreds==false LL1AnalyzerHitPred = TokenInvalidType ) @@ -38,11 +38,12 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { count := len(s.GetTransitions()) look := make([]*IntervalSet, count) for alt := 0; alt < count; alt++ { + look[alt] = NewIntervalSet() - lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing + lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) + + // Wipe out lookahead for la alternative if we found nothing, // or we had a predicate when we !seeThruPreds if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { look[alt] = nil @@ -51,32 +52,31 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { return look } -// * -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. +// Look computes the set of tokens that can follow s in the [ATN] in the +// specified ctx. // -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

+// If ctx is nil and the end of the rule containing +// s is reached, [EPSILON] is added to the result set. // -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context +// If ctx is not nil and the end of the outermost rule is +// reached, [EOF] is added to the result set. +// +// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a +// [BlockEndState] to detect epsilon paths through a closure. +// +// Parameter ctx is the complete parser context, or nil if the context // should be ignored // -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// / +// The func returns the set of tokens that can follow s in the [ATN] in the +// specified ctx. func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext + var lookContext *PredictionContext if ctx != nil { lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) } - la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) + la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), + NewBitSet(), true, true) return r } @@ -110,16 +110,17 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet // outermost context is reached. This parameter has no effect if {@code ctx} // is {@code nil}. -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { +func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { returnState := la.atn.states[ctx.getReturnState(i)] la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) } -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - c := NewBaseATNConfig6(s, 0, ctx) + c := NewATNConfig6(s, 0, ctx) if lookBusy.Contains(c) { return @@ -151,7 +152,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look return } - if ctx != BasePredictionContextEMPTY { + if ctx.pcType != PredictionContextEmpty { removed := calledRuleStack.contains(s.GetRuleIndex()) defer func() { if removed { @@ -202,7 +203,8 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look } } -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { +func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go new file mode 100644 index 00000000000..923c7b52c47 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go @@ -0,0 +1,47 @@ +//go:build !antlr.stats + +package antlr + +// This file is compiled when the build configuration antlr.stats is not enabled. +// which then allows the compiler to optimize out all the code that is not used. +const collectStats = false + +// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled. +type goRunStats struct { +} + +var Statistics = &goRunStats{} + +func (s *goRunStats) AddJStatRec(_ *JStatRec) { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) CollectionAnomalies() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Reset() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Report(dir string, prefix string) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func (s *goRunStats) Analyze() { + // Do nothing - compiler will optimize this out (hopefully) +} + +type statsOption func(*goRunStats) error + +func (s *goRunStats) Configure(options ...statsOption) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + return nil + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go similarity index 80% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go rename to vendor/github.com/antlr4-go/antlr/v4/parser.go index d26bf063920..fb57ac15db7 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go @@ -48,8 +48,10 @@ type BaseParser struct { _SyntaxErrors int } -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// +// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error +// recovery stuff. +// +//goland:noinspection GoUnusedExportedFunction func NewBaseParser(input TokenStream) *BaseParser { p := new(BaseParser) @@ -58,39 +60,46 @@ func NewBaseParser(input TokenStream) *BaseParser { // The input stream. p.input = nil + // The error handling strategy for the parser. The default value is a new // instance of {@link DefaultErrorStrategy}. p.errHandler = NewDefaultErrorStrategy() p.precedenceStack = make([]int, 0) p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. + + // The ParserRuleContext object for the currently executing rule. // p.is always non-nil during the parsing process. p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during + + // Specifies whether the parser should construct a parse tree during // the parsing process. The default value is {@code true}. p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is + + // When setTrace(true) is called, a reference to the + // TraceListener is stored here, so it can be easily removed in a + // later call to setTrace(false). The listener itself is // implemented as a parser listener so p.field is not directly used by // other parser methods. p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive + + // The list of ParseTreeListener listeners registered to receive // events during the parse. p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. + // incremented each time NotifyErrorListeners is called. p._SyntaxErrors = 0 p.SetInputStream(input) return p } -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with +// This field maps from the serialized ATN string to the deserialized [ATN] with // bypass alternatives. // -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +// [ATNDeserializationOptions.isGenerateRuleBypassTransitions] +// +//goland:noinspection GoUnusedGlobalVariable var bypassAltsAtnCache = make(map[string]int) // reset the parser's state// @@ -143,10 +152,13 @@ func (p *BaseParser) Match(ttype int) Token { p.Consume() } else { t = p.errHandler.RecoverInline(p) + if p.HasError() { + return nil + } if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol + + // we must have conjured up a new token during single token + // insertion if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -178,9 +190,8 @@ func (p *BaseParser) MatchWildcard() Token { } else { t = p.errHandler.RecoverInline(p) if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol + // we must have conjured up a new token during single token + // insertion if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -202,33 +213,27 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener { return p.parseListeners } -// Registers {@code listener} to receive events during the parsing process. +// AddParseListener registers listener to receive events during the parsing process. // -//

To support output-preserving grammar transformations (including but not +// To support output-preserving grammar transformations (including but not // limited to left-recursion removal, automated left-factoring, and // optimized code generation), calls to listener methods during the parse // may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// [ParseTreeWalker.DEFAULT] used after the parse is complete. In // particular, rule entry and exit events may occur in a different order // during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
+// rule entry methods may be omitted. // -// @param listener the listener to add +// With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same. // -// @panics nilPointerException if {@code} listener is {@code nil} +// - Alterations to the grammar used to generate code may change the +// behavior of the listener calls. +// - Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls. +// - Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls. func (p *BaseParser) AddParseListener(listener ParseTreeListener) { if listener == nil { panic("listener") @@ -239,11 +244,10 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) { p.parseListeners = append(p.parseListeners, listener) } -// Remove {@code listener} from the list of parse listeners. +// RemoveParseListener removes listener from the list of parse listeners. // -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove +// If listener is nil or has not been added as a parse +// listener, this func does nothing. func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { if p.parseListeners != nil { @@ -274,7 +278,7 @@ func (p *BaseParser) removeParseListeners() { p.parseListeners = nil } -// Notify any parse listeners of an enter rule event. +// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. func (p *BaseParser) TriggerEnterRuleEvent() { if p.parseListeners != nil { ctx := p.ctx @@ -285,9 +289,7 @@ func (p *BaseParser) TriggerEnterRuleEvent() { } } -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. func (p *BaseParser) TriggerExitRuleEvent() { if p.parseListeners != nil { // reverse order walk of listeners @@ -314,19 +316,16 @@ func (p *BaseParser) GetTokenFactory() TokenFactory { return p.input.GetTokenSource().GetTokenFactory() } -// Tell our token source and error strategy about a Newway to create tokens.// +// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. func (p *BaseParser) setTokenFactory(factory TokenFactory) { p.input.GetTokenSource().setTokenFactory(factory) } -// The ATN with bypass alternatives is expensive to create so we create it +// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it // lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. func (p *BaseParser) GetATNWithBypassAlts() { - // TODO + // TODO - Implement this? panic("Not implemented!") // serializedAtn := p.getSerializedATN() @@ -354,6 +353,7 @@ func (p *BaseParser) GetATNWithBypassAlts() { // String id = m.Get("ID") // +//goland:noinspection GoUnusedParameter func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { panic("NewParseTreePatternMatcher not implemented!") @@ -386,14 +386,16 @@ func (p *BaseParser) GetTokenStream() TokenStream { return p.input } -// Set the token stream and reset the parser.// +// SetTokenStream installs input as the token stream and resets the parser. func (p *BaseParser) SetTokenStream(input TokenStream) { p.input = nil p.reset() p.input = input } -// Match needs to return the current input symbol, which gets put +// GetCurrentToken returns the current token at LT(1). +// +// [Match] needs to return the current input symbol, which gets put // into the label for the associated token ref e.g., x=ID. func (p *BaseParser) GetCurrentToken() Token { return p.input.LT(1) @@ -446,7 +448,7 @@ func (p *BaseParser) addContextToParseTree() { } } -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) { p.SetState(state) p.ctx = localctx p.ctx.SetStart(p.input.LT(1)) @@ -474,7 +476,7 @@ func (p *BaseParser) ExitRule() { func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx + // if we have a new localctx, make sure we replace existing ctx // that is previous child of parse tree if p.BuildParseTrees && p.ctx != localctx { if p.ctx.GetParent() != nil { @@ -498,7 +500,7 @@ func (p *BaseParser) GetPrecedence() int { return p.precedenceStack[len(p.precedenceStack)-1] } -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) { p.SetState(state) p.precedenceStack.Push(precedence) p.ctx = localctx @@ -512,7 +514,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleI // // Like {@link //EnterRule} but for recursive rules. -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) { previous := p.ctx previous.SetParent(localctx) previous.SetInvokingState(state) @@ -530,7 +532,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, } func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() + _, _ = p.precedenceStack.Pop() p.ctx.SetStop(p.input.LT(-1)) retCtx := p.ctx // save current ctx (return value) // unroll so ctx is as it was before call to recursive method @@ -561,29 +563,22 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { return nil } -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { +func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool { return precedence >= p.precedenceStack[len(p.precedenceStack)-1] } +//goland:noinspection GoUnusedParameter func (p *BaseParser) inContext(context ParserRuleContext) bool { // TODO: useful in parser? return false } -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is +// IsExpectedToken checks whether symbol can follow the current state in the +// {ATN}. The behavior of p.method is equivalent to the following, but is // implemented such that the complete context-sensitive follow set does not // need to be explicitly constructed. // -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - +// return getExpectedTokens().contains(symbol) func (p *BaseParser) IsExpectedToken(symbol int) bool { atn := p.Interpreter.atn ctx := p.ctx @@ -611,11 +606,9 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool { return false } -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, +// GetExpectedTokens and returns the set of input symbols which could follow the current parser +// state and context, as given by [GetState] and [GetContext], // respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) func (p *BaseParser) GetExpectedTokens() *IntervalSet { return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) } @@ -626,7 +619,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { return atn.NextTokens(s, nil) } -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found. func (p *BaseParser) GetRuleIndex(ruleName string) int { var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] if ok { @@ -636,13 +629,10 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int { return -1 } -// Return List<String> of the rule names in your parser instance +// GetRuleInvocationStack returns a list of the rule names in your parser instance // leading up to a call to the current rule. You could override if // you want more details such as the file/line info of where // in the ATN a rule is invoked. -// -// this very useful for error messages. - func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { if c == nil { c = p.ctx @@ -668,16 +658,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { return stack } -// For debugging and other purposes.// +// GetDFAStrings returns a list of all DFA states used for debugging purposes func (p *BaseParser) GetDFAStrings() string { return fmt.Sprint(p.Interpreter.decisionToDFA) } -// For debugging and other purposes.// +// DumpDFA prints the whole of the DFA for debugging func (p *BaseParser) DumpDFA() { seenOne := false for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.states.Len() > 0 { + if dfa.Len() > 0 { if seenOne { fmt.Println() } @@ -692,8 +682,10 @@ func (p *BaseParser) GetSourceName() string { return p.GrammarFileName } -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. +// SetTrace installs a trace listener for the parse. +// +// During a parse it is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. This is for quick and dirty debugging. func (p *BaseParser) SetTrace(trace *TraceListener) { if trace == nil { p.RemoveParseListener(p.tracer) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go similarity index 64% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go index 8bcc46a0d99..ae2869692a6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -10,31 +10,51 @@ import ( "strings" ) -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorTraceATNSim = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false - TurnOffLRLoopEntryBranchOpt = false -) +var () + +// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over +// a standard JStore so that we can use Lazy instantiation of the JStore, mostly +// to avoid polluting the stats module with a ton of JStore instances with nothing in them. +type ClosureBusy struct { + bMap *JStore[*ATNConfig, Comparator[*ATNConfig]] + desc string +} + +// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules +func NewClosureBusy(desc string) *ClosureBusy { + return &ClosureBusy{ + desc: desc, + } +} + +func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) { + if c.bMap == nil { + c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc) + } + return c.bMap.Put(config) +} type ParserATNSimulator struct { - *BaseATNSimulator + BaseATNSimulator parser Parser predictionMode int input TokenStream startIndex int dfa *DFA - mergeCache *DoubleDict + mergeCache *JPCMap outerContext ParserRuleContext } +//goland:noinspection GoUnusedExportedFunction func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + p := &ParserATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } p.parser = parser p.decisionToDFA = decisionToDFA @@ -46,12 +66,12 @@ func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, shared p.outerContext = nil p.dfa = nil // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. + // Don't keep around as it wastes huge amounts of memory. [JPCMap] + // isn't Synchronized, but we're ok since two threads shouldn't reuse same + // parser/atn-simulator object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid + // the merge if we ever see a and b again. Note that (b,a) -> c should + // also be examined during cache lookup. // p.mergeCache = nil @@ -69,14 +89,14 @@ func (p *ParserATNSimulator) SetPredictionMode(v int) { func (p *ParserATNSimulator) reset() { } -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + " exec LA(1)==" + p.getLookaheadName(input) + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) } - p.input = input p.startIndex = input.Index() p.outerContext = outerContext @@ -88,7 +108,15 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou defer func() { p.dfa = nil - p.mergeCache = nil // wack cache after each prediction + p.mergeCache = nil // whack cache after each prediction + // Do not attempt to run a GC now that we're done with the cache as makes the + // GC overhead terrible for badly formed grammars and has little effect on well formed + // grammars. + // I have made some extra effort to try and reduce memory pressure by reusing allocations when + // possible. However, it can only have a limited effect. The real solution is to encourage grammar + // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect + // what is happening at runtime, along with using the error listener to report ambiguities. + input.Seek(index) input.Release(m) }() @@ -113,7 +141,7 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou if outerContext == nil { outerContext = ParserRuleContextEmpty } - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + " exec LA(1)==" + p.getLookaheadName(input) + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) @@ -142,47 +170,52 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou p.atn.stateMu.Unlock() } - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { + alt, re := p.execATN(dfa, s0, input, index, outerContext) + parser.SetError(re) + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) } return alt } -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - +// execATN performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. +// // There are some key conditions we're looking for after computing a new // set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - +// +// - If the set is empty, there is no viable alternative for current symbol +// - Does the state uniquely predict an alternative? +// - Does the state have a conflict that would prevent us from +// putting it on the work list? +// // We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: // -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { +// - Add an edge from previous DFA state to potentially NewDFA state, D, +// - Upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// - Collecting predicates and adding semantic context to DFA accept states +// - adding rule context to context-sensitive DFA accept states +// - Consuming an input symbol +// - Reporting a conflict +// - Reporting an ambiguity +// - Reporting a context sensitivity +// - Reporting insufficient predicates +// +// Cover these cases: +// +// - dead end +// - single alt +// - single alt + predicates +// - conflict +// - conflict + predicates +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + ", DFA state " + s0.String() + ", LA(1)==" + p.getLookaheadName(input) + @@ -191,7 +224,7 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, previousD := s0 - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("s0 = " + s0.String()) } t := input.LA(1) @@ -214,17 +247,17 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, input.Seek(startIndex) alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) if alt != ATNInvalidAltNumber { - return alt + return alt, nil } - - panic(e) + p.parser.SetError(e) + return ATNInvalidAltNumber, e } if D.requiresFullContext && p.predictionMode != PredictionModeSLL { // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() + conflictingAlts := D.configs.conflictingAlts if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL fail-over") } conflictIndex := input.Index() if conflictIndex != startIndex { @@ -232,10 +265,10 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, } conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("Full LL avoided") } - return conflictingAlts.minValue() + return conflictingAlts.minValue(), nil } if conflictIndex != startIndex { // restore the index so Reporting the fallback to full @@ -243,18 +276,18 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, input.Seek(conflictIndex) } } - if ParserATNSimulatorDFADebug { + if runtimeConfig.parserATNSimulatorDFADebug { fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) } fullCtx := true s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt + alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt, re } if D.isAcceptState { if D.predicates == nil { - return D.prediction + return D.prediction, nil } stopIndex := input.Index() input.Seek(startIndex) @@ -262,13 +295,13 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, switch alts.length() { case 0: - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) + return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex) case 1: - return alts.minValue() + return alts.minValue(), nil default: // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() + return alts.minValue(), nil } } previousD = D @@ -314,7 +347,8 @@ func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) // @return The computed target DFA state for the given input symbol // {@code t}. If {@code t} does not lead to a valid DFA state, p method // returns {@link //ERROR}. - +// +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { reach := p.computeReachSet(previousD.configs, t, false) @@ -322,12 +356,12 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) return ATNSimulatorError } - // create Newtarget state we'll add to DFA after it's complete + // create new target state we'll add to DFA after it's complete D := NewDFAState(-1, reach) predictedAlt := p.getUniqueAlt(reach) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { altSubSets := PredictionModegetConflictingAltSubsets(reach) fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + ", previous=" + previousD.configs.String() + @@ -340,17 +374,17 @@ func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t if predictedAlt != ATNInvalidAltNumber { // NO CONFLICT, UNIQUELY PREDICTED ALT D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) + D.configs.uniqueAlt = predictedAlt D.setPrediction(predictedAlt) } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) + D.configs.conflictingAlts = p.getConflictingAlts(reach) D.requiresFullContext = true // in SLL-only mode, we will stop at p state and return the minimum alt D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) + D.setPrediction(D.configs.conflictingAlts.minValue()) } - if D.isAcceptState && D.configs.HasSemanticContext() { + if D.isAcceptState && D.configs.hasSemanticContext { p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) if D.predicates != nil { D.setPrediction(ATNInvalidAltNumber) @@ -381,15 +415,17 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState } // comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("execATNWithFullContext " + s0.String()) } fullCtx := true foundExactAmbig := false - var reach ATNConfigSet + var reach *ATNConfigSet previous := s0 input.Seek(startIndex) t := input.LA(1) @@ -407,25 +443,23 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT // ATN states in SLL implies LL will also get nowhere. // If conflict in states that dip out, choose min since we // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) input.Seek(startIndex) alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) if alt != ATNInvalidAltNumber { - return alt + return alt, nil } - - panic(e) + return alt, p.noViableAlt(input, outerContext, previous, startIndex) } altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) + reach.uniqueAlt = p.getUniqueAlt(reach) // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() + if reach.uniqueAlt != ATNInvalidAltNumber { + predictedAlt = reach.uniqueAlt break } if p.predictionMode != PredictionModeLLExactAmbigDetection { @@ -454,9 +488,9 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT // If the configuration set uniquely predicts an alternative, // without conflict, then we know that it's a full LL decision // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { + if reach.uniqueAlt != ATNInvalidAltNumber { p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt + return predictedAlt, nil } // We do not check predicates here because we have checked them // on-the-fly when doing full context prediction. @@ -469,10 +503,10 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT // // For example, we might know that we have conflicting configurations. // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - + // conflict. It's possible to have non-conflicting alt subsets as in: + // // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - + // // from // // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), @@ -487,14 +521,15 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) - return predictedAlt + return predictedAlt, nil } -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() + p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()") } - intermediate := NewBaseATNConfigSet(fullCtx) + intermediate := NewATNConfigSet(fullCtx) // Configurations already in a rule stop state indicate reaching the end // of the decision rule (local context) or end of the start rule (full @@ -506,18 +541,18 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // ensure that the alternative Matching the longest overall sequence is // chosen when multiple such configurations can Match the input. - var skippedStopStates []*BaseATNConfig + var skippedStopStates []*ATNConfig // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { + for _, c := range closure.configs { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) } if _, ok := c.GetState().(*RuleStopState); ok { if fullCtx || t == TokenEOF { - skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { + skippedStopStates = append(skippedStopStates, c) + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("added " + c.String() + " to SkippedStopStates") } } @@ -527,9 +562,9 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt for _, trans := range c.GetState().GetTransitions() { target := p.getReachableTarget(trans, t) if target != nil { - cfg := NewBaseATNConfig4(c, target) + cfg := NewATNConfig4(c, target) intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("added " + cfg.String() + " to intermediate") } } @@ -537,7 +572,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt } // Now figure out where the reach operation can take us... - var reach ATNConfigSet + var reach *ATNConfigSet // This block optimizes the reach operation for intermediate sets which // trivially indicate a termination state for the overall @@ -565,8 +600,8 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // operation on the intermediate set to compute its initial value. // if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) + reach = NewATNConfigSet(fullCtx) + closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy") treatEOFAsEpsilon := t == TokenEOF amount := len(intermediate.configs) for k := 0; k < amount; k++ { @@ -588,10 +623,10 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt // // This is handled before the configurations in SkippedStopStates, // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's + // already guaranteed to meet this condition whether it's // required. // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) + reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate)) } // If SkippedStopStates!=nil, then it contains at least one // configuration. For full-context reach operations, these @@ -607,41 +642,40 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt } } - if ParserATNSimulatorTraceATNSim { + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) } - if len(reach.GetItems()) == 0 { + if len(reach.configs) == 0 { return nil } return reach } -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. +// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from +// configs which are in a [RuleStopState]. If all +// configurations in configs are already in a rule stop state, this +// method simply returns configs. // -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is +// When lookToEndOfRule is true, this method uses +// [ATN].[NextTokens] for each configuration in configs which is // not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

+// from the configuration via epsilon-only transitions. // -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states +// When lookToEndOfRule is true, this method checks for rule stop states // reachable by epsilon-only transitions from each configuration in -// {@code configs}. +// configs. // -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { +// The func returns configs if all configurations in configs are in a +// rule stop state, otherwise it returns a new configuration set containing only +// the configurations from configs which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet { if PredictionModeallConfigsInRuleStopStates(configs) { return configs } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { + result := NewATNConfigSet(configs.fullCtx) + for _, config := range configs.configs { if _, ok := config.GetState().(*RuleStopState); ok { result.Add(config, p.mergeCache) continue @@ -650,91 +684,81 @@ func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfi NextTokens := p.atn.NextTokens(config.GetState(), nil) if NextTokens.contains(TokenEpsilon) { endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) + result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache) } } } return result } -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet { // always at least the implicit call to start rule initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + configs := NewATNConfigSet(fullCtx) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("computeStartState from ATN state " + a.String() + " initialContext=" + initialContext.String()) } for i := 0; i < len(a.GetTransitions()); i++ { target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) + c := NewATNConfig6(target, i+1, initialContext) + closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy") p.closure(c, configs, closureBusy, true, fullCtx, false) } return configs } -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation +// applyPrecedenceFilter transforms the start state computed by +// [computeStartState] to the special start state used by a +// precedence [DFA] for a particular precedence value. The transformation // process applies the following changes to the start state's configuration // set. // -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
+// 1. Evaluate the precedence predicates for each configuration using +// [SemanticContext].evalPrecedence. +// 2. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. +// +// Transformation 2 is valid for the following reasons: +// +// - The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule. +// - The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level. +// +// The prediction context must be considered by this filter to address +// situations like the following: +// +// grammar TA +// prog: statement* EOF +// statement: letterA | statement letterA 'b' +// letterA: 'a' // -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge +// In the above grammar, the [ATN] state immediately before the token +// reference 'a' in letterA is reachable from the left edge // of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these +// statement. The prediction context associated with each of these // configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} +// which stepped out to prog, and then back in to statement // from being eliminated by the filter. -//

// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { +// The func returns the transformed configuration set representing the start state +// for a precedence [DFA] at a particular precedence level (determined by +// calling [Parser].getPrecedence). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet { - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) + statesFromAlt1 := make(map[int]*PredictionContext) + configSet := NewATNConfigSet(configs.fullCtx) - for _, config := range configs.GetItems() { + for _, config := range configs.configs { // handle alt 1 first if config.GetAlt() != 1 { continue @@ -746,12 +770,12 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf } statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) + configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache) } else { configSet.Add(config, p.mergeCache) } } - for _, config := range configs.GetItems() { + for _, config := range configs.configs { if config.GetAlt() == 1 { // already handled @@ -780,10 +804,11 @@ func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATN return nil } -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { + for _, c := range configs.configs { if ambigAlts.contains(c.GetAlt()) { altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) } @@ -797,11 +822,11 @@ func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATN nPredAlts++ } } - // nonambig alts are nil in altToPred + // unambiguous alts are nil in altToPred if nPredAlts == 0 { altToPred = nil } - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) } return altToPred @@ -812,7 +837,7 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre containsPredicate := false for i := 1; i < len(altToPred); i++ { pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE + // un-predicated is indicated by SemanticContextNONE if ambigAlts != nil && ambigAlts.contains(i) { pairs = append(pairs, NewPredPrediction(pred, i)) } @@ -826,51 +851,42 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre return pairs } -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. +// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by +// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the +// Error state was reached during [ATN] simulation. // -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the +// The default implementation of this method uses the following +// algorithm to identify an [ATN] configuration which successfully parsed the // decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete +// [ParserRuleContext] returned by the calling rule will be complete // and valid, and the syntax error will be Reported later at a more -// localized location.

+// localized location. // -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
+// - If a syntactically valid path or paths reach the end of the decision rule, and +// they are semantically valid if predicated, return the min associated alt. +// - Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +// - Otherwise, return [ATNInvalidAltNumber]. // -//

// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration +// alternative which will result in a [FailedPredicateException] in +// the parser. Specifically, this could occur if the only configuration // capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific +// blocked by a semantic predicate. By choosing this alternative within +// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting +// [FailedPredicateException] in the parser will identify the specific // predicate which is preventing the parser from successfully parsing the // decision rule, which helps developers identify and correct logic errors // in semantic predicates. -//

// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper +// pass in the configs holding ATN configurations which were valid immediately before +// the ERROR state was reached, outerContext as the initial parser context from the paper // or the parser stack at the instant before prediction commences. // -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { +// Teh func returns the value to return from [AdaptivePredict], or +// [ATNInvalidAltNumber] if a suitable alternative was not +// identified and [AdaptivePredict] should report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) semValidConfigs := cfgs[0] semInvalidConfigs := cfgs[1] @@ -879,7 +895,7 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry return alt } // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { + if len(semInvalidConfigs.configs) > 0 { alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) if alt != ATNInvalidAltNumber { // syntactically viable path exists return alt @@ -888,10 +904,10 @@ func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntry return ATNInvalidAltNumber } -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { alts := NewIntervalSet() - for _, c := range configs.GetItems() { + for _, c := range configs.configs { _, ok := c.GetState().(*RuleStopState) if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { @@ -915,14 +931,14 @@ func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConf // prediction, which is where predicates need to evaluate. type ATNConfigSetPair struct { - item0, item1 ATNConfigSet + item0, item1 *ATNConfigSet } -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet { + succeeded := NewATNConfigSet(configs.fullCtx) + failed := NewATNConfigSet(configs.fullCtx) - for _, c := range configs.GetItems() { + for _, c := range configs.configs { if c.GetSemanticContext() != SemanticContextNone { predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) if predicateEvaluationResult { @@ -934,15 +950,16 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS succeeded.Add(c, nil) } } - return []ATNConfigSet{succeeded, failed} + return []*ATNConfigSet{succeeded, failed} } -// Look through a list of predicate/alt pairs, returning alts for the +// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the +// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an +// un-predicated runtimeConfig which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. // -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { predictions := NewBitSet() for i := 0; i < len(predPredictions); i++ { @@ -956,11 +973,11 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti } predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) } if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) } predictions.add(pair.alt) @@ -972,19 +989,82 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti return predictions } -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { +func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { initialDepth := 0 p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, fullCtx, initialDepth, treatEOFAsEpsilon) } -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if ParserATNSimulatorTraceATNSim { +func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("closure(" + config.String() + ")") - //fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") + } + + var stack []*ATNConfig + visited := make(map[*ATNConfig]bool) + + stack = append(stack, config) + + for len(stack) > 0 { + currConfig := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currConfig]; ok { + continue + } + visited[currConfig] = true + + if _, ok := currConfig.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !currConfig.GetContext().isEmpty() { + for i := 0; i < currConfig.GetContext().length(); i++ { + if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[currConfig.GetContext().getReturnState(i)] + newContext := currConfig.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext()) + + stack = append(stack, c) + } + continue + } else if fullCtx { + // reached end of start rule + configs.Add(currConfig, p.mergeCache) + continue + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + } } + + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") } if _, ok := config.GetState().(*RuleStopState); ok { @@ -994,11 +1074,12 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs for i := 0; i < config.GetContext().length(); i++ { if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) + nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) continue } else { // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) } p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) @@ -1008,7 +1089,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs returnState := p.atn.states[config.GetContext().getReturnState(i)] newContext := config.GetContext().GetParent(i) // "pop" return state - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) // While we have context to pop back from, we may have // gotten that context AFTER having falling off a rule. // Make sure we track that we are now out of context. @@ -1022,7 +1103,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs return } else { // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) } } @@ -1030,8 +1111,10 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) } -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { +// Do the actual work of walking epsilon edges +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { state := config.GetState() // optimization if !state.GetEpsilonOnlyTransitions() { @@ -1048,7 +1131,7 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, _, ok := t.(*ActionTransition) continueCollecting := collectPredicates && !ok c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { + if c != nil { newDepth := depth if _, ok := config.GetState().(*RuleStopState); ok { @@ -1056,7 +1139,7 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, // We can't get here if incoming config was rule stop and we had context // track how far we dip into outer context. Might // come in handy and we avoid evaluating context dependent - // preds if p is > 0. + // preds if this is > 0. if p.dfa != nil && p.dfa.getPrecedenceDfa() { if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { @@ -1072,9 +1155,9 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, continue } - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method + configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method newDepth-- - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("dips into outer ctx: " + c.String()) } } else { @@ -1098,8 +1181,9 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, } } -func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { - if TurnOffLRLoopEntryBranchOpt { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool { + if !runtimeConfig.lRLoopEntryBranchOpt { return false } @@ -1196,7 +1280,7 @@ func (p *ParserATNSimulator) getRuleName(index int) string { return sb.String() } -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { +func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig { switch t.getSerializationType() { case TransitionRULE: @@ -1208,13 +1292,13 @@ func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, co case TransitionACTION: return p.actionTransition(config, t.(*ActionTransition)) case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) + return NewATNConfig4(config, t.getTarget()) case TransitionATOM, TransitionRANGE, TransitionSET: // EOF transitions act like epsilon transitions after the first EOF // transition is traversed if treatEOFAsEpsilon { if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) + return NewATNConfig4(config, t.getTarget()) } } return nil @@ -1223,60 +1307,63 @@ func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, co } } -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) } - return NewBaseATNConfig4(config, t.getTarget()) + return NewATNConfig4(config, t.getTarget()) } -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") if p.parser != nil { fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) } } - var c *BaseATNConfig + var c *ATNConfig if collectPredicates && inContext { if fullCtx { // In full context mode, we can evaluate predicates on-the-fly // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates + // the runtimeConfig sets. It also obviates the need to test predicates // later during conflict resolution. currentPosition := p.input.Index() p.input.Seek(p.startIndex) predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + c = NewATNConfig4(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) } } else { - c = NewBaseATNConfig4(config, pt.getTarget()) + c = NewATNConfig4(config, pt.getTarget()) } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("runtimeConfig from pred transition=" + c.String()) } return c } -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) if p.parser != nil { fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) } } - var c *BaseATNConfig + var c *ATNConfig if collectPredicates && (!pt.isCtxDependent || inContext) { if fullCtx { // In full context mode, we can evaluate predicates on-the-fly @@ -1288,78 +1375,92 @@ func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTrans predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) p.input.Seek(currentPosition) if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + c = NewATNConfig4(config, pt.getTarget()) // no pred context } } else { newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) } } else { - c = NewBaseATNConfig4(config, pt.getTarget()) + c = NewATNConfig4(config, pt.getTarget()) } - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("config from pred transition=" + c.String()) } return c } -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) } returnState := t.followState newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) + return NewATNConfig1(config, t.getTarget(), newContext) } -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { +func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { altsets := PredictionModegetConflictingAltSubsets(configs) return PredictionModeGetAlts(altsets) } -// Sam pointed out a problem with the previous definition, v3, of +// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of // ambiguous states. If we have another state associated with conflicting // alternatives, we should keep going. For example, the following grammar // -// s : (ID | ID ID?) '' +// s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ;, it has a [DFA] +// state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]]. +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node +// because alternative to has another way to continue, via +// +// [6|2|[]]. // -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. // The key is that we have a single state that has config's only associated // with a single alternative, 2, and crucially the state transitions // among the configurations are all non-epsilon transitions. That means // we don't consider any conflicts that include alternative 2. So, we // ignore the conflict between alts 1 and 2. We ignore a set of // conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. +// associated with a single alt state in the state config-list map. // // It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: +// also a 3rd non-conflicting configuration for a different alternative: +// +// [1|1|[], 1|2|[], 8|3|[]]. +// +// This can come about from grammar: // -// a : A | A | A B +// a : A | A | A B // // After Matching input A, we reach the stop state for rule A, state 1. // State 8 is the state right before B. Clearly alternatives 1 and 2 // conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned +// However, alternative 3 will be able to continue, so we do not +// stop working on this state. +// +// In the previous example, we're concerned // with states associated with the conflicting alternatives. Here alt // 3 is not associated with the conflicting configs, but since we can continue // looking for input reasonably, I don't declare the state done. We // ignore a set of conflicting alts when we have an alternative // that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { + if configs.uniqueAlt != ATNInvalidAltNumber { conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) + conflictingAlts.add(configs.uniqueAlt) } else { - conflictingAlts = configs.GetConflictingAlts() + conflictingAlts = configs.conflictingAlts } return conflictingAlts } @@ -1384,11 +1485,10 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { return p.GetTokenName(input.LA(1)) } -// Used for debugging in AdaptivePredict around execATN but I cut -// -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { +// Used for debugging in [AdaptivePredict] around [execATN], but I cut +// it out for clarity now that alg. works well. We can leave this +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { panic("Not implemented") @@ -1418,13 +1518,13 @@ func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { // } } -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) } -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { +func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { + for _, c := range configs.configs { if alt == ATNInvalidAltNumber { alt = c.GetAlt() // found first alt } else if c.GetAlt() != alt { @@ -1452,8 +1552,10 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { // @return If {@code to} is {@code nil}, p method returns {@code nil} // otherwise p method returns the result of calling {@link //addDFAState} // on {@code to} +// +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) } if to == nil { @@ -1472,7 +1574,7 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA from.setIthEdge(t+1, to) // connect p.atn.edgeMu.Unlock() - if ParserATNSimulatorDebug { + if runtimeConfig.parserATNSimulatorDebug { var names []string if p.parser != nil { names = p.parser.GetLiteralNames() @@ -1483,48 +1585,49 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA return to } -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. +// addDFAState adds state D to the [DFA] if it is not already present, and returns +// the actual instance stored in the [DFA]. If a state equivalent to D +// is already in the [DFA], the existing state is returned. Otherwise, this +// method returns D after adding it to the [DFA]. // -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

+// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and +// does not change the DFA. // -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. +//goland:noinspection GoBoolExpressions func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { if d == ATNSimulatorError { return d } - existing, present := dfa.states.Get(d) + + existing, present := dfa.Get(d) if present { - if ParserATNSimulatorTraceATNSim { + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Print("addDFAState " + d.String() + " exists") } return existing } - // The state was not present, so update it with configs + // The state will be added if not already there or we will be given back the existing state struct + // if it is present. // - d.stateNumber = dfa.states.Len() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) + d.stateNumber = dfa.Len() + if !d.configs.readOnly { + d.configs.OptimizeConfigs(&p.BaseATNSimulator) + d.configs.readOnly = true + d.configs.configLookup = nil } - dfa.states.Put(d) - if ParserATNSimulatorTraceATNSim { + dfa.Put(d) + + if runtimeConfig.parserATNSimulatorTraceATNSim { fmt.Println("addDFAState new " + d.String()) } return d } -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) @@ -1534,8 +1637,9 @@ func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAl } } -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) @@ -1545,10 +1649,15 @@ func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, } } -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { +// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. +// +// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer +// so that they can see that this is happening and can take action if they want to. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { interval := NewInterval(startIndex, stopIndex+1) fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go similarity index 77% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go rename to vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go index 1c8cee74795..c249bc1385c 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go @@ -31,7 +31,9 @@ type ParserRuleContext interface { } type BaseParserRuleContext struct { - *BaseRuleContext + parentCtx RuleContext + invokingState int + RuleIndex int start, stop Token exception RecognitionException @@ -40,8 +42,22 @@ type BaseParserRuleContext struct { func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { prc := new(BaseParserRuleContext) + InitBaseParserRuleContext(prc, parent, invokingStateNumber) + return prc +} + +func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { + // What context invoked b rule? + prc.parentCtx = parent - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + prc.invokingState = -1 + } else { + prc.invokingState = invokingStateNumber + } prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, @@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) // The exception that forced prc rule to return. If the rule successfully // completed, prc is {@code nil}. prc.exception = nil - - return prc } func (prc *BaseParserRuleContext) SetException(e RecognitionException) { @@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string { return s } -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +// EnterRule is called when any rule is entered. +func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { } -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +// ExitRule is called when any rule is exited. +func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { } -// * Does not set parent link other add methods do that/// +// * Does not set parent link other add methods do that func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { if prc.children == nil { prc.children = make([]Tree, 0) @@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { return child } -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / +// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as +// we entered a rule. If we have a label, we will need to remove +// the generic ruleContext object. func (prc *BaseParserRuleContext) RemoveLastChild() { if prc.children != nil && len(prc.children) > 0 { prc.children = prc.children[0 : len(prc.children)-1] @@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int { return len(prc.children) } -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval } @@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } +func (prc *BaseParserRuleContext) SetParent(v Tree) { + if v == nil { + prc.parentCtx = nil + } else { + prc.parentCtx = v.(RuleContext) + } +} + +func (prc *BaseParserRuleContext) GetInvokingState() int { + return prc.invokingState +} + +func (prc *BaseParserRuleContext) SetInvokingState(t int) { + prc.invokingState = t +} + +func (prc *BaseParserRuleContext) GetRuleIndex() int { + return prc.RuleIndex +} + +func (prc *BaseParserRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} + +// IsEmpty returns true if the context of b is empty. +// +// A context is empty if there is no invoking state, meaning nobody calls +// current context. +func (prc *BaseParserRuleContext) IsEmpty() bool { + return prc.invokingState == -1 +} + +// GetParent returns the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +// +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of this +// method. +func (prc *BaseParserRuleContext) GetParent() Tree { + return prc.parentCtx +} + var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { @@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct { *BaseParserRuleContext } +//goland:noinspection GoUnusedExportedFunction func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { prc := new(BaseInterpreterRuleContext) diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go new file mode 100644 index 00000000000..c1b80cc1f0f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -0,0 +1,727 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "golang.org/x/exp/slices" + "strconv" +) + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +const ( + // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $ + // doesn't mean wildcard: + // + // $ + x = [$,x] + // + // Here, + // + // $ = EmptyReturnState + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here +// +//goland:noinspection GoUnusedGlobalVariable +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +const ( + PredictionContextEmpty = iota + PredictionContextSingleton + PredictionContextArray +) + +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + nep := &PredictionContext{} + nep.cachedHash = calculateEmptyHash() + nep.pcType = PredictionContextEmpty + nep.returnState = BasePredictionContextEmptyReturnState + return nep +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{} + pc.pcType = PredictionContextSingleton + pc.returnState = returnState + pc.parentCtx = parent + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + nec := &PredictionContext{} + nec.cachedHash = hash + nec.pcType = PredictionContextArray + nec.parents = parents + nec.returnStates = returnStates + return nec +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + switch p.pcType { + case PredictionContextEmpty: + otherP := other.(*PredictionContext) + return other == nil || otherP == nil || otherP.isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other == nil || other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(p.returnStates, other.returnStates) && + slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { + return x.Equals(y) + }) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + if otherP == nil { + return false + } + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as wildcard + if rootIsWildcard { + if a.isEmpty() { + return a + } + if b.isEmpty() { + return b + } + } + + // Convert either Singleton or Empty to arrays, so that we can merge them + // + ara := convertToArray(a) + arb := convertToArray(b) + return mergeArrays(ara, arb, rootIsWildcard, mergeCache) +} + +func convertToArray(pc *PredictionContext) *PredictionContext { + switch pc.Type() { + case PredictionContextEmpty: + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) + case PredictionContextSingleton: + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + default: + // Already an array + } + return pc +} + +// mergeSingletons merges two Singleton [PredictionContext] instances. +// +// Stack tops equal, parents merge is same return left graph. +//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A new root node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + return previous + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.Put(a, b, rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent.Equals(a.parentCtx) { + return a // ax + bx = ax, if a=b + } + if parent.Equals(b.parentCtx) { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array. + // New joined parent so create a new singleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.Put(a, b, spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent *PredictionContext + if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []*PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []*PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []*PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext { + if rootIsWildcard { + if a.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // // + b =// + } + if b.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a.isEmpty() && b.isEmpty() { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a.isEmpty() { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// +//goland:noinspection GoBoolExpressions +func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.Put(a, b, pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation + if M.Equals(a) { + if mergeCache != nil { + mergeCache.Put(a, b, a) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") + } + return a + } + if M.Equals(b) { + if mergeCache != nil { + mergeCache.Put(a, b, b) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") + } + return b + } + combineCommonParents(&mergedParents) + + if mergeCache != nil { + mergeCache.Put(a, b, M) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) + } + return M +} + +// Make pass over all M parents and merge any Equals() ones. +// Note that we pass a pointer to the slice as we want to modify it in place. +// +//goland:noinspection GoUnusedFunction +func combineCommonParents(parents *[]*PredictionContext) { + uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") + + for p := 0; p < len(*parents); p++ { + parent := (*parents)[p] + _, _ = uniqueParents.Put(parent) + } + for q := 0; q < len(*parents); q++ { + pc, _ := uniqueParents.Get((*parents)[q]) + (*parents)[q] = pc + } +} + +func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext { + if context.isEmpty() { + return context + } + existing, present := visited.Get(context) + if present { + return existing + } + + existing, present = contextCache.Get(context) + if present { + visited.Put(context, existing) + return existing + } + changed := false + parents := make([]*PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || !parent.Equals(context.GetParent(i)) { + if !changed { + parents = make([]*PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited.Put(context, context) + return context + } + var updated *PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.GetReturnStates()) + } + contextCache.add(updated) + visited.Put(updated, updated) + visited.Put(context, updated) + + return updated +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go new file mode 100644 index 00000000000..25dfb11e8ff --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go @@ -0,0 +1,48 @@ +package antlr + +var BasePredictionContextEMPTY = &PredictionContext{ + cachedHash: calculateEmptyHash(), + pcType: PredictionContextEmpty, + returnState: BasePredictionContextEmptyReturnState, +} + +// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. +type PredictionContextCache struct { + cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]] +} + +func NewPredictionContextCache() *PredictionContextCache { + return &PredictionContextCache{ + cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"), + } +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a new context to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext { + if ctx.isEmpty() { + return BasePredictionContextEMPTY + } + + // Put will return the existing entry if it is present (note this is done via Equals, not whether it is + // the same pointer), otherwise it will add the new entry and return that. + // + existing, present := p.cache.Get(ctx) + if present { + return existing + } + p.cache.Put(ctx, ctx) + return ctx +} + +func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) { + pc, exists := p.cache.Get(ctx) + return pc, exists +} + +func (p *PredictionContextCache) length() int { + return p.cache.Len() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go new file mode 100644 index 00000000000..3f85a6a520b --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // PredictionModeSLL represents the SLL(*) prediction mode. + // This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the SLL prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful LL prediction abilities to complete successfully. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeSLL = 0 + + // PredictionModeLL represents the LL(*) prediction mode. + // This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // report a precise answer for exactly which alternatives are + // ambiguous. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLL = 1 + + // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode + // with exact ambiguity detection. + // + // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLLExactAmbigDetection = 2 +) + +// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition. +// +// This method computes the SLL prediction termination condition for both of +// the following cases: +// +// - The usual SLL+LL fallback upon SLL conflict +// - Pure SLL without LL fallback +// +// # Combined SLL+LL Parsing +// +// When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction. +// +// Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative, e.g. +// +// {1,2} and {3,4} +// +// If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations. +// +// Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty. +// +// # Heuristic +// +// As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon): +// +// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ';', it has a +// [DFA] state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]] +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node because alternative to has another way to continue, +// via +// +// [6|2|[]] +// +// It also let's us continue for this rule: +// +// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ; +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state immediately before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done. +// +// # Pure SLL Parsing +// +// To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic. +// +// # Predicates in SLL+LL Parsing +// +// SLL decisions don't evaluate predicates until after they reach [DFA] stop +// states because they need to create the [DFA] cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation, so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates. +// +// Implementation-wise, [ATNConfigSet] combines stack contexts but not +// semantic predicate contexts, so we might see two configurations like the +// following: +// +// (s, 1, x, {}), (s, 1, x', {p}) +// +// Before testing these configurations against others, we have to merge +// x and x' (without modifying the existing configurations). +// For example, we test (x+x')==x” when looking for conflicts in +// the following configurations: +// +// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {}) +// +// If the configuration set has predicates (as indicated by +// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of +// the configurations to strip out all the predicates so that a standard +// [ATNConfigSet] will merge everything ignoring predicates. +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool { + + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input, so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.hasSemanticContext { + // dup configs, tossing out semantic predicates + dup := NewATNConfigSet(false) + for _, c := range configs.configs { + + // NewATNConfig({semanticContext:}, c) + c = NewATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar predicates + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// The func returns true if any configuration in the supplied configs is in a [RuleStopState] +func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// the func returns true if all configurations in configs are in a +// [RuleStopState] +func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { + + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination. +// +// Can we stop looking ahead during [ATN] simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict. +// +// The basic idea is to split the set of configurations 'C', into +// conflicting subsets (s, _, ctx, _) and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical [ATNConfig].state and [ATNConfig].context values +// but a different [ATNConfig].alt value, e.g. +// +// (s, i, ctx, _) +// +// and +// +// (s, j, ctx, _) ; for i != j +// +// Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows: +// +// A_s,ctx = {i | (s, i, ctx, _)} +// +// for each configuration in C holding s and ctx fixed. +// +// Or in pseudo-code: +// +// for each configuration c in C: +// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred +// +// The values in map are the set of +// +// A_s,ctx +// +// sets. +// +// If +// +// |A_s,ctx| = 1 +// +// then there is no conflict associated with s and ctx. +// +// Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// further lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round. +// +// The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal; you will still have the conflict. It's just inefficient. It +// might even look until the end of file. +// +// No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check. +// +// # Conflicting Configs +// +// Two configurations: +// +// (s, i, x) and (s, j, x') +// +// conflict when i != j but x = x'. Because we merge all +// (s, i, _) configurations together, that means that there are at +// most n configurations associated with state s for +// n possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts x and x'. +// +// Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either x or x'. +// If the x associated with lowest alternative i +// is the superset, then i is the only possible prediction since the +// others resolve to min(i) as well. However, if x is +// associated with j > i then at least one stack configuration for +// j is not in conflict with alternative i. The algorithm +// should keep going, looking for more lookahead due to the uncertainty. +// +// For simplicity, I'm doing an equality check between x and +// x', which lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict. +// +// # Continue/Stop Rule +// +// Continue if the union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict. +// +// The complete set of alternatives, +// +// [i for (_, i, _)] +// +// tells us which alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set. +// +// Cases +// +// - no conflicts and more than 1 alternative in set => continue +// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set +// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set +// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1 +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets +// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2} +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets +// {1} ∪ {3} = {1,3} => continue +// +// # Exact Ambiguity Detection +// +// If all states report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set: +// +// |A_i| > 1 +// +// and +// +// A_i = A_j ; for all i, j +// +// In other words, we continue examining lookahead until all A_i +// have more than one alternative and all A_i are the same. If +// +// A={{1,2}, {1,3}} +// +// then regular LL prediction would terminate because the resolved set is {1}. +// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like: +// +// A={{1,2}} +// +// or +// +// {{1,2},{1,2}}, etc... +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more +// than one alternative. +// +// The func returns true if every [BitSet] in altsets has +// [BitSet].cardinality cardinality > 1 +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains +// exactly one alternative. +// +// The func returns true if altsets contains at least one [BitSet] with +// [BitSet].cardinality cardinality 1 +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains +// more than one alternative. +// +// The func returns true if altsets contains a [BitSet] with +// [BitSet].cardinality cardinality > 1, otherwise false +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent. +// +// The func returns true if every member of altsets is equal to the others. +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in +// altsets. If no such alternative exists, this method returns +// [ATNInvalidAltNumber]. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each [BitSet] +// in altsets, being the set of represented alternatives in altsets. +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// +// for each configuration c in configs: +// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred +func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { + configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()") + + for _, c := range configs.configs { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. +// +// for each configuration c in configs: +// map[c.ATNConfig.state] U= c.ATNConfig.alt} +func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.configs { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets +// if there is one. +// +// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go similarity index 70% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go rename to vendor/github.com/antlr4-go/antlr/v4/recognizer.go index bfe542d0914..2e0b504fb3d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -26,6 +26,9 @@ type Recognizer interface { RemoveErrorListeners() GetATN() *ATN GetErrorListenerDispatch() ErrorListener + HasError() bool + GetError() RecognitionException + SetError(RecognitionException) } type BaseRecognizer struct { @@ -36,6 +39,7 @@ type BaseRecognizer struct { LiteralNames []string SymbolicNames []string GrammarFileName string + SynErr RecognitionException } func NewBaseRecognizer() *BaseRecognizer { @@ -45,7 +49,10 @@ func NewBaseRecognizer() *BaseRecognizer { return rec } +//goland:noinspection GoUnusedGlobalVariable var tokenTypeMapCache = make(map[string]int) + +//goland:noinspection GoUnusedGlobalVariable var ruleIndexMapCache = make(map[string]int) func (b *BaseRecognizer) checkVersion(toolVersion string) { @@ -55,7 +62,19 @@ func (b *BaseRecognizer) checkVersion(toolVersion string) { } } -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { +func (b *BaseRecognizer) SetError(err RecognitionException) { + b.SynErr = err +} + +func (b *BaseRecognizer) HasError() bool { + return b.SynErr != nil +} + +func (b *BaseRecognizer) GetError() RecognitionException { + return b.SynErr +} + +func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) { panic("action not implemented on Recognizer!") } @@ -105,9 +124,11 @@ func (b *BaseRecognizer) SetState(v int) { // return result //} -// Get a map from rule names to rule indexes. +// GetRuleIndexMap Get a map from rule names to rule indexes. // -//

Used for XPath and tree pattern compilation.

+// Used for XPath and tree pattern compilation. +// +// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed. func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { panic("Method not defined!") @@ -124,7 +145,8 @@ func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { // return result } -func (b *BaseRecognizer) GetTokenType(tokenName string) int { +// GetTokenType get the token type based upon its name +func (b *BaseRecognizer) GetTokenType(_ string) int { panic("Method not defined!") // var ttype = b.GetTokenTypeMap()[tokenName] // if (ttype !=nil) { @@ -162,26 +184,27 @@ func (b *BaseRecognizer) GetTokenType(tokenName string) int { // } //} -// What is the error header, normally line/character position information?// +// GetErrorHeader returns the error header, normally line/character position information. +// +// Can be overridden in sub structs embedding BaseRecognizer. func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { line := e.GetOffendingToken().GetLine() column := e.GetOffendingToken().GetColumn() return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) } -// How should a token be displayed in an error message? The default +// GetTokenErrorDisplay shows how a token should be displayed in an error message. // -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. // -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. +// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific +// implementations of [ANTLRErrorStrategy] may provide a similar +// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay() func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { if t == nil { return "" @@ -205,12 +228,14 @@ func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { return NewProxyErrorListener(b.listeners) } -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { +// Sempred embedding structs need to override this if there are sempreds or actions +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool { return true } -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { +// Precpred embedding structs need to override this if there are preceding predicates +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool { return true } diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go new file mode 100644 index 00000000000..f2ad04793e1 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// RuleContext is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. +// +// We actually carry no information about the rule associated with this context (except +// when parsing). We keep only the state number of the invoking state from +// the [ATN] submachine that invoked this. Contrast this with the s +// pointer inside [ParserRuleContext] that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the struct +// [ParserRuleContext], which embeds a RuleContext. +// +// @see ParserRuleContext +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go rename to vendor/github.com/antlr4-go/antlr/v4/semantic_context.go index a702e99def7..68cb9061eb6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go @@ -9,14 +9,13 @@ import ( "strconv" ) -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// SemanticContext is a tree structure used to record the semantic context in which // -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

+// an ATN configuration is valid. It's either a single predicate, +// a conjunction p1 && p2, or a sum of products p1 || p2. // - +// I have scoped the AND, OR, and Predicate subclasses of +// [SemanticContext] within the scope of this outer ``class'' type SemanticContext interface { Equals(other Collectable[SemanticContext]) bool Hash() int @@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { var SemanticContextNone = NewPredicate(-1, -1, false) -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { +func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { return p } @@ -198,7 +197,7 @@ type AND struct { func NewAND(a, b SemanticContext) *AND { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.Put(o) @@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) and := new(AND) and.opnds = opnds @@ -316,12 +313,12 @@ func (a *AND) Hash() int { return murmurFinish(h, len(a.opnds)) } -func (a *OR) Hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { +func (o *OR) Hash() int { + h := murmurInit(41) // Init with o value different from AND + for _, op := range o.opnds { h = murmurUpdate(h, op.Hash()) } - return murmurFinish(h, len(a.opnds)) + return murmurFinish(h, len(o.opnds)) } func (a *AND) String() string { @@ -349,7 +346,7 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.Put(o) @@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) o := new(OR) o.opnds = opnds diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go new file mode 100644 index 00000000000..70c0673a0f6 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -0,0 +1,281 @@ +//go:build antlr.stats + +package antlr + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "sync" +) + +// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default +// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag. +// + +// Tells various components to collect statistics - because it is only true when this file is included, it will +// allow the compiler to completely eliminate all the code that is only used when collecting statistics. +const collectStats = true + +// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run. +// It is exported so that it can be used by others to look for things that are not already looked for in the +// runtime statistics. +type goRunStats struct { + + // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created + // during a run. It is exported so that it can be used by others to look for things that are not already looked for + // within this package. + // + jStats []*JStatRec + jStatsLock sync.RWMutex + topN int + topNByMax []*JStatRec + topNByUsed []*JStatRec + unusedCollections map[CollectionSource]int + counts map[CollectionSource]int +} + +const ( + collectionsFile = "collections" +) + +var ( + Statistics = &goRunStats{ + topN: 10, + } +) + +type statsOption func(*goRunStats) error + +// Configure allows the statistics system to be configured as the user wants and override the defaults +func (s *goRunStats) Configure(options ...statsOption) error { + for _, option := range options { + err := option(s) + if err != nil { + return err + } + } + return nil +} + +// WithTopN sets the number of things to list in the report when we are concerned with the top N things. +// +// For example, if you want to see the top 20 collections by size, you can do: +// +// antlr.Statistics.Configure(antlr.WithTopN(20)) +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + s.topN = topN + return nil + } +} + +// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user. +// +// The function gathers and analyzes a number of statistics about any particular run of +// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only +// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be +// especially useful in tracking down bugs or performance problems when an ANTLR user could +// supply the output from this package, but cannot supply the grammar file(s) they are using, even +// privately to the maintainers. +// +// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user +// must call this function their selves to analyze the statistics. This is because none of the infrastructure is +// extant unless the calling program is built with the antlr.stats tag like so: +// +// go build -tags antlr.stats . +// +// When a program is built with the antlr.stats tag, the Statistics object is created and available outside +// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the +// [Statistics.Report] function to report the statistics. +// +// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to +// me [Jim Idle] directly at jimi@idle.ws +// +// [Jim Idle]: https:://github.com/jim-idle +func (s *goRunStats) Analyze() { + + // Look for anything that looks strange and record it in our local maps etc for the report to present it + // + s.CollectionAnomalies() + s.TopNCollections() +} + +// TopNCollections looks through all the statistical records and gathers the top ten collections by size. +func (s *goRunStats) TopNCollections() { + + // Let's sort the stat records by MaxSize + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].MaxSize > s.jStats[j].MaxSize + }) + + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByMax = append(s.topNByMax, s.jStats[i]) + } + + // Sort by the number of times used + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts + }) + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByUsed = append(s.topNByUsed, s.jStats[i]) + } +} + +// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output +// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be +// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix. +func (s *goRunStats) Report(dir string, prefix string) error { + + isDir, err := isDirectory(dir) + switch { + case err != nil: + return err + case !isDir: + return fmt.Errorf("output directory `%s` is not a directory", dir) + } + s.reportCollections(dir, prefix) + + // Clean out any old data in case the user forgets + // + s.Reset() + return nil +} + +func (s *goRunStats) Reset() { + s.jStats = nil + s.topNByUsed = nil + s.topNByMax = nil +} + +func (s *goRunStats) reportCollections(dir, prefix string) { + cname := filepath.Join(dir, ".asciidoctor") + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + _, _ = f.WriteString(`// .asciidoctorconfig +++++ + +++++`) + _ = f.Close() + + fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc") + // If the file doesn't exist, create it, or append to the file + f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer func(f *os.File) { + err := f.Close() + if err != nil { + log.Fatal(err) + } + }(f) + _, _ = f.WriteString("= Collections for " + prefix + "\n\n") + + _, _ = f.WriteString("== Summary\n") + + if s.unusedCollections != nil { + _, _ = f.WriteString("=== Unused Collections\n") + _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n") + _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n") + _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n") + _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n") + _, _ = f.WriteString(" actually needed.\n\n") + + _, _ = f.WriteString("\n.Unused collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + + for k, v := range s.unusedCollections { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + f.WriteString("|===\n\n") + } + + _, _ = f.WriteString("\n.Summary of Collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + for k, v := range s.counts { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n") + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n") + for _, c := range s.topNByMax { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n") + for _, c := range s.topNByUsed { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") +} + +// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled. +func (s *goRunStats) AddJStatRec(rec *JStatRec) { + s.jStatsLock.Lock() + defer s.jStatsLock.Unlock() + s.jStats = append(s.jStats, rec) +} + +// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found. +func (s *goRunStats) CollectionAnomalies() { + s.jStatsLock.RLock() + defer s.jStatsLock.RUnlock() + s.counts = make(map[CollectionSource]int, len(s.jStats)) + for _, c := range s.jStats { + + // Accumlate raw counts + // + s.counts[c.Source]++ + + // Look for allocated but unused collections and count them + if c.MaxSize == 0 && c.Puts == 0 { + if s.unusedCollections == nil { + s.unusedCollections = make(map[CollectionSource]int) + } + s.unusedCollections[c.Source]++ + } + if c.MaxSize > 6000 { + fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar") + } + } + +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go new file mode 100644 index 00000000000..4d9eb94e5fa --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go @@ -0,0 +1,23 @@ +package antlr + +// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be +// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies +// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function +// for ideas on what can be gleaned from these statistics about collections. +type JStatRec struct { + Source CollectionSource + MaxSize int + CurSize int + Gets int + GetHits int + GetMisses int + GetHashConflicts int + GetNoEnt int + Puts int + PutHits int + PutMisses int + PutHashConflicts int + MaxSlotSize int + Description string + CreateStack []byte +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go similarity index 86% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go rename to vendor/github.com/antlr4-go/antlr/v4/token.go index f73b06bc6a0..9670efb829e 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -35,6 +35,8 @@ type Token interface { GetTokenSource() TokenSource GetInputStream() CharStream + + String() string } type BaseToken struct { @@ -53,7 +55,7 @@ type BaseToken struct { const ( TokenInvalidType = 0 - // During lookahead operations, this "token" signifies we hit rule end ATN state + // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state // and did not follow it despite needing to. TokenEpsilon = -2 @@ -61,15 +63,16 @@ const ( TokenEOF = -1 - // All tokens go to the parser (unless Skip() is called in that rule) + // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. + // + // All tokens go to the parser (unless [Skip] is called in the lexer rule) // on a particular "channel". The parser tunes to a particular channel // so that whitespace etc... can go to the parser on a "hidden" channel. - TokenDefaultChannel = 0 - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - + // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. + // + // Anything on a different channel than TokenDefaultChannel is not parsed by parser. TokenHiddenChannel = 1 ) @@ -118,21 +121,22 @@ func (b *BaseToken) GetInputStream() CharStream { } type CommonToken struct { - *BaseToken + BaseToken } func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - t := new(CommonToken) - - t.BaseToken = new(BaseToken) + t := &CommonToken{ + BaseToken: BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 if t.source.tokenSource != nil { t.line = source.tokenSource.GetLine() t.column = source.tokenSource.GetCharPositionInLine() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go rename to vendor/github.com/antlr4-go/antlr/v4/token_source.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go similarity index 90% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/token_stream.go index 1527d43f608..bf4ff6633e8 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go @@ -8,13 +8,14 @@ type TokenStream interface { IntStream LT(k int) Token + Reset() Get(index int) Token GetTokenSource() TokenSource SetTokenSource(TokenSource) GetAllText() string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string GetTextFromRuleContext(RuleContext) string GetTextFromTokens(Token, Token) string } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go similarity index 73% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go rename to vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go index b3e38af3445..ccf59b465c5 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go +++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go @@ -86,14 +86,15 @@ import ( // first example shows.

const ( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 ) // Define the rewrite operation hierarchy type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. // Return the index of the next token to operate on. Execute(buffer *bytes.Buffer) int @@ -112,19 +113,19 @@ type RewriteOperation interface { type BaseRewriteOperation struct { //Current index of rewrites list - instruction_index int + instructionIndex int //Token buffer index index int //Substitution text text string //Actual operation name - op_name string + opName string //Pointer to token steam tokens TokenStream } func (op *BaseRewriteOperation) GetInstructionIndex() int { - return op.instruction_index + return op.instructionIndex } func (op *BaseRewriteOperation) GetIndex() int { @@ -136,7 +137,7 @@ func (op *BaseRewriteOperation) GetText() string { } func (op *BaseRewriteOperation) GetOpName() string { - return op.op_name + return op.opName } func (op *BaseRewriteOperation) GetTokens() TokenStream { @@ -144,7 +145,7 @@ func (op *BaseRewriteOperation) GetTokens() TokenStream { } func (op *BaseRewriteOperation) SetInstructionIndex(val int) { - op.instruction_index = val + op.instructionIndex = val } func (op *BaseRewriteOperation) SetIndex(val int) { @@ -156,20 +157,20 @@ func (op *BaseRewriteOperation) SetText(val string) { } func (op *BaseRewriteOperation) SetOpName(val string) { - op.op_name = val + op.opName = val } func (op *BaseRewriteOperation) SetTokens(val TokenStream) { op.tokens = val } -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { +func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int { return op.index } func (op *BaseRewriteOperation) String() string { return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, + op.opName, op.tokens.Get(op.GetIndex()), op.text, ) @@ -182,10 +183,10 @@ type InsertBeforeOp struct { func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index, - text: text, - op_name: "InsertBeforeOp", - tokens: stream, + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, }} } @@ -201,20 +202,21 @@ func (op *InsertBeforeOp) String() string { return op.BaseRewriteOperation.String() } -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - +// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions +// first and then the "insert before" instructions at same index. Implementation +// of "insert after" is "insert before index+1". type InsertAfterOp struct { BaseRewriteOperation } func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { - return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index + 1, - text: text, - tokens: stream, - }} + return &InsertAfterOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }, + } } func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { @@ -229,7 +231,7 @@ func (op *InsertAfterOp) String() string { return op.BaseRewriteOperation.String() } -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp // instructions. type ReplaceOp struct { BaseRewriteOperation @@ -239,10 +241,10 @@ type ReplaceOp struct { func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { return &ReplaceOp{ BaseRewriteOperation: BaseRewriteOperation{ - index: from, - text: text, - op_name: "ReplaceOp", - tokens: stream, + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, }, LastIndex: to, } @@ -270,17 +272,17 @@ type TokenStreamRewriter struct { // You may have multiple, named streams of rewrite operations. // I'm calling these things "programs." // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int + programs map[string][]RewriteOperation + lastRewriteTokenIndexes map[string]int } func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { return &TokenStreamRewriter{ tokens: tokens, programs: map[string][]RewriteOperation{ - Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), + DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize), }, - last_rewrite_token_indexes: map[string]int{}, + lastRewriteTokenIndexes: map[string]int{}, } } @@ -291,110 +293,110 @@ func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { // Rollback the instruction stream for a program so that // the indicated instruction (via instructionIndex) is no // longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { - is, ok := tsr.programs[program_name] +func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) { + is, ok := tsr.programs[programName] if ok { - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + tsr.programs[programName] = is[MinTokenIndex:instructionIndex] } } -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { - tsr.Rollback(Default_Program_Name, instruction_index) +func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) { + tsr.Rollback(DefaultProgramName, instructionIndex) } -// Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +// DeleteProgram Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(programName string) { + tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included } func (tsr *TokenStreamRewriter) DeleteProgramDefault() { - tsr.DeleteProgram(Default_Program_Name) + tsr.DeleteProgram(DefaultProgramName) } -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { +func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) { // to insert after, just insert before next index (even if past end) var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { - tsr.InsertAfter(Default_Program_Name, index, text) + tsr.InsertAfter(DefaultProgramName, index, text) } -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) { + tsr.InsertAfter(programName, token.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { +func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) { var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { - tsr.InsertBefore(Default_Program_Name, index, text) + tsr.InsertBefore(DefaultProgramName, index, text) } -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) { + tsr.InsertBefore(programName, token.GetTokenIndex(), text) } -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { +func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) { if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", from, to, tsr.tokens.Size())) } var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) + rewrites := tsr.GetProgram(programName) op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) + tsr.AddToProgram(programName, op) } func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) + tsr.Replace(DefaultProgramName, from, to, text) } func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { tsr.ReplaceDefault(index, index, text) } -func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) { + tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text) } func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { - tsr.ReplaceToken(Default_Program_Name, from, to, text) + tsr.ReplaceToken(DefaultProgramName, from, to, text) } func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { tsr.ReplaceTokenDefault(index, index, text) } -func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { - tsr.Replace(program_name, from, to, "") +func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) { + tsr.Replace(programName, from, to, "") } func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { - tsr.Delete(Default_Program_Name, from, to) + tsr.Delete(DefaultProgramName, from, to) } func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { tsr.DeleteDefault(index, index) } -func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") +func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) { + tsr.ReplaceToken(programName, from, to, "") } func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { - tsr.DeleteToken(Default_Program_Name, from, to) + tsr.DeleteToken(DefaultProgramName, from, to) } -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { - i, ok := tsr.last_rewrite_token_indexes[program_name] +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int { + i, ok := tsr.lastRewriteTokenIndexes[programName] if !ok { return -1 } @@ -402,15 +404,15 @@ func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) in } func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) + return tsr.GetLastRewriteTokenIndex(DefaultProgramName) } -func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { - tsr.last_rewrite_token_indexes[program_name] = i +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) { + tsr.lastRewriteTokenIndexes[programName] = i } func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { - is := make([]RewriteOperation, 0, Program_Init_Size) + is := make([]RewriteOperation, 0, ProgramInitSize) tsr.programs[name] = is return is } @@ -429,24 +431,24 @@ func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { return is } -// Return the text from the original tokens altered per the +// GetTextDefault returns the text from the original tokens altered per the // instructions given to this rewriter. func (tsr *TokenStreamRewriter) GetTextDefault() string { return tsr.GetText( - Default_Program_Name, + DefaultProgramName, NewInterval(0, tsr.tokens.Size()-1)) } -// Return the text from the original tokens altered per the +// GetText returns the text from the original tokens altered per the // instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] +func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string { + rewrites := tsr.programs[programName] start := interval.Start stop := interval.Stop // ensure start/end are in range stop = min(stop, tsr.tokens.Size()-1) start = max(start, 0) - if rewrites == nil || len(rewrites) == 0 { + if len(rewrites) == 0 { return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute } buf := bytes.Buffer{} @@ -482,11 +484,13 @@ func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) return buf.String() } -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: +// reduceToSingleOperationPerIndex combines operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... +// +// Here are the cases: // -// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.j.v leave alone, non-overlapping // I.i.u I.i.v combine: Iivu // // R.i-j.u R.x-y.v | i-j in x-y delete first R @@ -498,38 +502,38 @@ func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) // // I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping // R.x-y.v I.i.u | i in x-y ERROR // R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping // // I.i.u = insert u before op @ index i // R.x-y.u = replace x-y indexed tokens with u // -// First we need to examine replaces. For any replace op: +// First we need to examine replaces. For any replace op: // -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. // -// Then we can deal with inserts: +// Then we can deal with inserts: // -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this 'replace'. +// 3. throw exception if index in same range as previous replace // -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. // -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the 'replace' range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. // -// Return a map from token index to operation. +// The func returns a map from token index to operation. func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { // WALK REPLACES for i := 0; i < len(rewrites); i++ { @@ -547,7 +551,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit if iop.index == rop.index { // E.g., insert before 2, delete 2..2; update replace // text to include insert before, kill insert - rewrites[iop.instruction_index] = nil + rewrites[iop.instructionIndex] = nil if rop.text != "" { rop.text = iop.text + rop.text } else { @@ -555,7 +559,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit } } else if iop.index > rop.index && iop.index <= rop.LastIndex { // delete insert as it's a no-op. - rewrites[iop.instruction_index] = nil + rewrites[iop.instructionIndex] = nil } } } @@ -564,7 +568,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit if prevop, ok := rewrites[j].(*ReplaceOp); ok { if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { // delete replace as it's a no-op. - rewrites[prevop.instruction_index] = nil + rewrites[prevop.instructionIndex] = nil continue } // throw exception unless disjoint or identical @@ -572,10 +576,9 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit // Delete special case of replace (text==null): // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) if prevop.text == "" && rop.text == "" && !disjoint { - rewrites[prevop.instruction_index] = nil + rewrites[prevop.instructionIndex] = nil rop.index = min(prevop.index, rop.index) rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) - println("new rop" + rop.String()) //TODO: remove console write, taken from Java version } else if !disjoint { panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) } @@ -607,7 +610,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { if prevIop.index == iop.GetIndex() { iop.SetText(iop.GetText() + prevIop.text) - rewrites[prevIop.instruction_index] = nil + rewrites[prevIop.instructionIndex] = nil } } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/trace_listener.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go similarity index 67% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go rename to vendor/github.com/antlr4-go/antlr/v4/transition.go index 36be4f73310..313b0fc127f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go +++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go @@ -72,7 +72,7 @@ func (t *BaseTransition) getSerializationType() int { return t.serializationType } -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *BaseTransition) Matches(_, _, _ int) bool { panic("Not implemented") } @@ -89,6 +89,7 @@ const ( TransitionPRECEDENCE = 10 ) +//goland:noinspection GoUnusedGlobalVariable var TransitionserializationNames = []string{ "INVALID", "EPSILON", @@ -127,19 +128,22 @@ var TransitionserializationNames = []string{ // TransitionPRECEDENCE //} +// AtomTransition // TODO: make all transitions sets? no, should remove set edges type AtomTransition struct { - *BaseTransition + BaseTransition } func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. + t := &AtomTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionATOM, + label: intervalSet, + isEpsilon: false, + }, + } t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM return t } @@ -150,7 +154,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet { return s } -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *AtomTransition) Matches(symbol, _, _ int) bool { return t.label == symbol } @@ -159,48 +163,45 @@ func (t *AtomTransition) String() string { } type RuleTransition struct { - *BaseTransition - + BaseTransition followState ATNState ruleIndex, precedence int } func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t + return &RuleTransition{ + BaseTransition: BaseTransition{ + target: ruleStart, + isEpsilon: true, + serializationType: TransitionRULE, + }, + ruleIndex: ruleIndex, + precedence: precedence, + followState: followState, + } } -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *RuleTransition) Matches(_, _, _ int) bool { return false } type EpsilonTransition struct { - *BaseTransition - + BaseTransition outermostPrecedenceReturn int } func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t + return &EpsilonTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionEPSILON, + isEpsilon: true, + }, + outermostPrecedenceReturn: outermostPrecedenceReturn, + } } -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *EpsilonTransition) Matches(_, _, _ int) bool { return false } @@ -209,19 +210,20 @@ func (t *EpsilonTransition) String() string { } type RangeTransition struct { - *BaseTransition - + BaseTransition start, stop int } func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop + t := &RangeTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionRANGE, + isEpsilon: false, + }, + start: start, + stop: stop, + } t.intervalSet = t.makeLabel() return t } @@ -232,7 +234,7 @@ func (t *RangeTransition) makeLabel() *IntervalSet { return s } -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *RangeTransition) Matches(symbol, _, _ int) bool { return symbol >= t.start && symbol <= t.stop } @@ -252,40 +254,41 @@ type AbstractPredicateTransition interface { } type BaseAbstractPredicateTransition struct { - *BaseTransition + BaseTransition } func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t + return &BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + }, + } } func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} type PredicateTransition struct { - *BaseAbstractPredicateTransition - + BaseAbstractPredicateTransition isCtxDependent bool ruleIndex, predIndex int } func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t + return &PredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPREDICATE, + isEpsilon: true, + }, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + predIndex: predIndex, + } } -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *PredicateTransition) Matches(_, _, _ int) bool { return false } @@ -298,26 +301,25 @@ func (t *PredicateTransition) String() string { } type ActionTransition struct { - *BaseTransition - + BaseTransition isCtxDependent bool ruleIndex, actionIndex, predIndex int } func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t + return &ActionTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionACTION, + isEpsilon: true, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + actionIndex: actionIndex, + } } -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *ActionTransition) Matches(_, _, _ int) bool { return false } @@ -326,26 +328,27 @@ func (t *ActionTransition) String() string { } type SetTransition struct { - *BaseTransition + BaseTransition } func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionSET, + }, + } - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET if set != nil { t.intervalSet = set } else { t.intervalSet = NewIntervalSet() t.intervalSet.addOne(TokenInvalidType) } - return t } -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *SetTransition) Matches(symbol, _, _ int) bool { return t.intervalSet.contains(symbol) } @@ -354,16 +357,24 @@ func (t *SetTransition) String() string { } type NotSetTransition struct { - *SetTransition + SetTransition } func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET + t := &NotSetTransition{ + SetTransition: SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionNOTSET, + }, + }, + } + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } return t } @@ -377,16 +388,16 @@ func (t *NotSetTransition) String() string { } type WildcardTransition struct { - *BaseTransition + BaseTransition } func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t + return &WildcardTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionWILDCARD, + }, + } } func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { @@ -398,24 +409,24 @@ func (t *WildcardTransition) String() string { } type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - + BaseAbstractPredicateTransition precedence int } func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t + return &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPRECEDENCE, + isEpsilon: true, + }, + }, + precedence: precedence, + } } -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { +func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool { return false } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go similarity index 62% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go rename to vendor/github.com/antlr4-go/antlr/v4/tree.go index 85b4f137b5a..c288420fb20 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go +++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go @@ -21,29 +21,23 @@ type Tree interface { type SyntaxTree interface { Tree - - GetSourceInterval() *Interval + GetSourceInterval() Interval } type ParseTree interface { SyntaxTree - Accept(Visitor ParseTreeVisitor) interface{} GetText() string - ToStringTree([]string, Recognizer) string } type RuleNode interface { ParseTree - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext } type TerminalNode interface { ParseTree - GetSymbol() Token } @@ -64,12 +58,12 @@ type BaseParseTreeVisitor struct{} var _ ParseTreeVisitor = &BaseParseTreeVisitor{} -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil } -// TODO +// TODO: Implement this? //func (this ParseTreeVisitor) Visit(ctx) { // if (Utils.isArray(ctx)) { // self := this @@ -101,15 +95,14 @@ type BaseParseTreeListener struct{} var _ ParseTreeListener = &BaseParseTreeListener{} -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} +func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} type TerminalNodeImpl struct { parentCtx RuleContext - - symbol Token + symbol Token } var _ TerminalNode = &TerminalNodeImpl{} @@ -123,7 +116,7 @@ func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { return tn } -func (t *TerminalNodeImpl) GetChild(i int) Tree { +func (t *TerminalNodeImpl) GetChild(_ int) Tree { return nil } @@ -131,7 +124,7 @@ func (t *TerminalNodeImpl) GetChildren() []Tree { return nil } -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { +func (t *TerminalNodeImpl) SetChildren(_ []Tree) { panic("Cannot set children on terminal node") } @@ -151,7 +144,7 @@ func (t *TerminalNodeImpl) GetPayload() interface{} { return t.symbol } -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { +func (t *TerminalNodeImpl) GetSourceInterval() Interval { if t.symbol == nil { return TreeInvalidInterval } @@ -179,7 +172,7 @@ func (t *TerminalNodeImpl) String() string { return t.symbol.GetText() } -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { +func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string { return t.String() } @@ -214,10 +207,9 @@ func NewParseTreeWalker() *ParseTreeWalker { return new(ParseTreeWalker) } -// Performs a walk on the given parse tree starting at the root and going down recursively -// with depth-first search. On each node, EnterRule is called before -// recursively walking down into child nodes, then -// ExitRule is called after the recursive call to wind up. +// Walk performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, [EnterRule] is called before +// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up. func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { switch tt := t.(type) { case ErrorNode: @@ -234,7 +226,7 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { } } -// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} +// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule] // then by triggering the event specific to the given parse tree node func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) @@ -242,12 +234,71 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { ctx.EnterRule(listener) } -// Exits a grammar rule by first triggering the event specific to the given parse tree node -// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} +// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event [ParseTreeListener].ExitEveryRule func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { ctx := r.GetRuleContext().(ParserRuleContext) ctx.ExitRule(listener) listener.ExitEveryRule(ctx) } +//goland:noinspection GoUnusedGlobalVariable var ParseTreeWalkerDefault = NewParseTreeWalker() + +type IterativeParseTreeWalker struct { + *ParseTreeWalker +} + +//goland:noinspection GoUnusedExportedFunction +func NewIterativeParseTreeWalker() *IterativeParseTreeWalker { + return new(IterativeParseTreeWalker) +} + +func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + var stack []Tree + var indexStack []int + currentNode := t + currentIndex := 0 + + for currentNode != nil { + // pre-order visit + switch tt := currentNode.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + i.EnterRule(listener, currentNode.(RuleNode)) + } + // Move down to first child, if exists + if currentNode.GetChildCount() > 0 { + stack = append(stack, currentNode) + indexStack = append(indexStack, currentIndex) + currentIndex = 0 + currentNode = currentNode.GetChild(0) + continue + } + + for { + // post-order visit + if ruleNode, ok := currentNode.(RuleNode); ok { + i.ExitRule(listener, ruleNode) + } + // No parent, so no siblings + if len(stack) == 0 { + currentNode = nil + currentIndex = 0 + break + } + // Move to next sibling if possible + currentIndex++ + if stack[len(stack)-1].GetChildCount() > currentIndex { + currentNode = stack[len(stack)-1].GetChild(currentIndex) + break + } + // No next, sibling, so move up + currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1] + currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1] + } + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/vendor/github.com/antlr4-go/antlr/v4/trees.go similarity index 81% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go rename to vendor/github.com/antlr4-go/antlr/v4/trees.go index d7dbb032282..f44c05d8111 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go +++ b/vendor/github.com/antlr4-go/antlr/v4/trees.go @@ -8,10 +8,8 @@ import "fmt" /** A set of utility routines useful for all kinds of ANTLR trees. */ -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. +// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the +// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately. func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { if recog != nil { @@ -32,7 +30,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { } for i := 1; i < c; i++ { s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) + res += " " + s } res += ")" return res @@ -62,7 +60,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { } } - // no recog for rule names + // no recognition for rule names payload := t.GetPayload() if p2, ok := payload.(Token); ok { return p2.GetText() @@ -71,7 +69,9 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { return fmt.Sprint(t.GetPayload()) } -// Return ordered list of all children of this node +// TreesGetChildren returns am ordered list of all children of this node +// +//goland:noinspection GoUnusedExportedFunction func TreesGetChildren(t Tree) []Tree { list := make([]Tree, 0) for i := 0; i < t.GetChildCount(); i++ { @@ -80,9 +80,10 @@ func TreesGetChildren(t Tree) []Tree { return list } -// Return a list of all ancestors of this node. The first node of +// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root +// and the last node is the parent of this node. // -// list is the root and the last is the parent of this node. +//goland:noinspection GoUnusedExportedFunction func TreesgetAncestors(t Tree) []Tree { ancestors := make([]Tree, 0) t = t.GetParent() @@ -94,10 +95,12 @@ func TreesgetAncestors(t Tree) []Tree { return ancestors } +//goland:noinspection GoUnusedExportedFunction func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { return TreesfindAllNodes(t, ttype, true) } +//goland:noinspection GoUnusedExportedFunction func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { return TreesfindAllNodes(t, ruleIndex, false) } @@ -129,6 +132,7 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr } } +//goland:noinspection GoUnusedExportedFunction func TreesDescendants(t ParseTree) []ParseTree { nodes := []ParseTree{t} for i := 0; i < t.GetChildCount(); i++ { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go similarity index 85% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go rename to vendor/github.com/antlr4-go/antlr/v4/utils.go index 9fad5d916bc..733d7df9dc7 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go +++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -9,8 +9,10 @@ import ( "errors" "fmt" "math/bits" + "os" "strconv" "strings" + "syscall" ) func intMin(a, b int) int { @@ -31,7 +33,7 @@ func intMax(a, b int) int { type IntStack []int -var ErrEmptyStack = errors.New("Stack is empty") +var ErrEmptyStack = errors.New("stack is empty") func (s *IntStack) Pop() (int, error) { l := len(*s) - 1 @@ -47,33 +49,13 @@ func (s *IntStack) Push(e int) { *s = append(*s, e) } -type comparable interface { - Equals(other Collectable[any]) bool -} - -func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { - - return a.Equals(b) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.Hash() - } - - panic("Not Hasher") -} - -type hasher interface { - Hash() int -} - const bitsPerWord = 64 func indexForBit(bit int) int { return bit / bitsPerWord } +//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction func wordForBit(data []uint64, bit int) uint64 { idx := indexForBit(bit) if idx >= len(data) { @@ -94,6 +76,8 @@ type BitSet struct { data []uint64 } +// NewBitSet creates a new bitwise set +// TODO: See if we can replace with the standard library's BitSet func NewBitSet() *BitSet { return &BitSet{} } @@ -123,7 +107,7 @@ func (b *BitSet) or(set *BitSet) { setLen := set.minLen() maxLen := intMax(bLen, setLen) if maxLen > len(b.data) { - // Increase the size of len(b.data) to repesent the bits in both sets. + // Increase the size of len(b.data) to represent the bits in both sets. data := make([]uint64, maxLen) copy(data, b.data) b.data = data @@ -246,37 +230,6 @@ func (a *AltDict) values() []interface{} { return vs } -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - func EscapeWhitespace(s string, escapeSpaces bool) string { s = strings.Replace(s, "\t", "\\t", -1) @@ -288,6 +241,7 @@ func EscapeWhitespace(s string, escapeSpaces bool) string { return s } +//goland:noinspection GoUnusedExportedFunction func TerminalNodeToStringArray(sa []TerminalNode) []string { st := make([]string, len(sa)) @@ -298,6 +252,7 @@ func TerminalNodeToStringArray(sa []TerminalNode) []string { return st } +//goland:noinspection GoUnusedExportedFunction func PrintArrayJavaStyle(sa []string) string { var buffer bytes.Buffer @@ -350,3 +305,24 @@ func murmurFinish(h int, numberOfWords int) int { return int(hash) } + +func isDirectory(dir string) (bool, error) { + fileInfo, err := os.Stat(dir) + if err != nil { + switch { + case errors.Is(err, syscall.ENOENT): + // The given directory does not exist, so we will try to create it + // + err = os.MkdirAll(dir, 0755) + if err != nil { + return false, err + } + + return true, nil + case err != nil: + return false, err + default: + } + } + return fileInfo.IsDir(), err +} diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS index 1f94a30b8ea..f02ba5c2791 100644 --- a/vendor/github.com/cilium/cilium/AUTHORS +++ b/vendor/github.com/cilium/cilium/AUTHORS @@ -69,6 +69,7 @@ ArthurChiao arthurchiao@hotmail.com Arthur Evstifeev mail@ap4y.me Arthur Outhenin-Chalandre arthur@cri.epita.fr Arvind Soni arvind@covalent.io +Ashley Reese ashley.reese@firma.seznam.cz Ashray Jain ashrayj@palantir.com Ashwin Paranjpe ashwin@covalent.io Assiya Khuzyakhmetova assiya.khuzyakhmetova@nu.edu.kz @@ -109,6 +110,7 @@ Chance Zibolski chance.zibolski@gmail.com Changyu Wang changyuwang@tencent.com Charles-Edouard Brétéché charled.breteche@gmail.com Charles-Henri Guérin charles-henri.guerin@zenika.com +chaunceyjiang chaunceyjiang@gmail.com Chen Kang kongchen28@gmail.com chentanjun tanjunchen20@gmail.com chenyahui chenyahui9@jd.com @@ -375,6 +377,7 @@ Manuel Stößel manuel.stoessel@t-systems.com Marcel Zieba marcel.zieba@isovalent.com Marcin Skarbek git@skarbek.name Marcin Swiderski forgems@gmail.com +Marco Aurelio Caldas Miranda 17923899+macmiranda@users.noreply.github.com Marco Hofstetter marco.hofstetter@isovalent.com Marco Iorio marco.iorio@isovalent.com Marco Kilchhofer mkilchhofer@users.noreply.github.com @@ -456,6 +459,7 @@ Nishant Burte nburte@google.com Nitish Malhotra nitishm@microsoft.com Noel Georgi git@frezbo.dev nrnrk noriki6t@gmail.com +nxyt lolnoxy@gmail.com Odin Ugedal ougedal@palantir.com Oilbeater mengxin@alauda.io Oksana Baranova oksana.baranova@intel.com @@ -539,6 +543,7 @@ Saim Safdar 59512053+Saim-Safdar@users.noreply.githu Saiyam Pathak saiyam@civo.com Salvatore Mazzarino salvatore@accuknox.com Sami Yessou fnzv@users.noreply.github.com +Samuel Lang gh@lang-sam.de Samuel Torres samuelpirestorres@gmail.com Sander Timmerman stimmerman@schubergphilis.com Sandipan Panda samparksandipan@gmail.com @@ -574,6 +579,7 @@ Stevo Slavić sslavic@gmail.com Stijn Smits stijn@stijn98s.nl Strukov Anton anstrukov@luxoft.com Stuart Preston mail@stuartpreston.net +Su Fei sofat1989@126.com Sugang Li sugangli@google.com Sven Haardiek sven.haardiek@uni-muenster.de Swaminathan Vasudevan svasudevan@suse.com @@ -592,6 +598,7 @@ Thomas Bachman tbachman@yahoo.com Thomas Balthazar thomas@balthazar.info Thomas Gosteli thomas.gosteli@protonmail.com Thomas Graf thomas@cilium.io +Thorben von Hacht tvonhacht@apple.com tigerK yanru.lv@daocloud.io Tim Horner timothy.horner@isovalent.com Timo Beckers timo@isovalent.com diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go index e479d2ee9b0..4c3f586fa5d 100644 --- a/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go +++ b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go @@ -202,11 +202,19 @@ type Backend interface { // error in that case is not expected to be fatal. The actual ID is obtained // by Allocator from the local idPool, which is updated with used-IDs as the // Backend makes calls to the handler in ListAndWatch. - AllocateID(ctx context.Context, id idpool.ID, key AllocatorKey) error + // The implementation of the backend might return an AllocatorKey that is + // a copy of 'key' with an internal reference of the backend key or, if it + // doesn't use the internal reference of the backend key it simply returns + // 'key'. In case of an error the returned 'AllocatorKey' should be nil. + AllocateID(ctx context.Context, id idpool.ID, key AllocatorKey) (AllocatorKey, error) // AllocateIDIfLocked behaves like AllocateID but when lock is non-nil the // operation proceeds only if it is still valid. - AllocateIDIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) error + // The implementation of the backend might return an AllocatorKey that is + // a copy of 'key' with an internal reference of the backend key or, if it + // doesn't use the internal reference of the backend key it simply returns + // 'key'. In case of an error the returned 'AllocatorKey' should be nil. + AllocateIDIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) (AllocatorKey, error) // AcquireReference records that this node is using this key->ID mapping. // This is distinct from any reference counting within this agent; only one @@ -464,6 +472,13 @@ type AllocatorKey interface { // PutKeyFromMap stores the labels in v into the key to be used later. This // is the inverse operation to GetAsMap. PutKeyFromMap(v map[string]string) AllocatorKey + + // PutValue puts metadata inside the global identity for the given 'key' with + // the given 'value'. + PutValue(key any, value any) AllocatorKey + + // Value returns the value stored in the metadata map. + Value(key any) any } func (a *Allocator) encodeKey(key AllocatorKey) string { @@ -530,7 +545,7 @@ func (a *Allocator) lockedAllocate(ctx context.Context, key AllocatorKey) (idpoo if err = a.backend.AcquireReference(ctx, value, key, lock); err != nil { a.localKeys.release(k) - return 0, false, false, fmt.Errorf("unable to create slave key '%s': %s", k, err) + return 0, false, false, fmt.Errorf("unable to create secondary key '%s': %s", k, err) } // mark the key as verified in the local cache @@ -579,12 +594,15 @@ func (a *Allocator) lockedAllocate(ctx context.Context, key AllocatorKey) (idpoo return 0, false, false, fmt.Errorf("Found master key after proceeding with new allocation for %s", k) } - err = a.backend.AllocateIDIfLocked(ctx, id, key, lock) + // Assigned to 'key' from 'key2' since in case of an error, we don't replace + // the original 'key' variable with 'nil'. + key2 := key + key, err = a.backend.AllocateIDIfLocked(ctx, id, key2, lock) if err != nil { // Creation failed. Another agent most likely beat us to allocting this // ID, retry. releaseKeyAndID() - return 0, false, false, fmt.Errorf("unable to allocate ID %s for key %s: %s", strID, key, err) + return 0, false, false, fmt.Errorf("unable to allocate ID %s for key %s: %s", strID, key2, err) } // Notify pool that leased ID is now in-use. @@ -595,7 +613,7 @@ func (a *Allocator) lockedAllocate(ctx context.Context, key AllocatorKey) (idpoo // exposed and may be in use by other nodes. The garbage // collector will release it again. releaseKeyAndID() - return 0, false, false, fmt.Errorf("slave key creation failed '%s': %s", k, err) + return 0, false, false, fmt.Errorf("secondary key creation failed '%s': %s", k, err) } // mark the key as verified in the local cache @@ -906,7 +924,7 @@ type RemoteCache struct { allocator *Allocator cache *cache - watchFunc func(ctx context.Context, remote *RemoteCache) + watchFunc func(ctx context.Context, remote *RemoteCache, onSync func(context.Context)) } func (a *Allocator) NewRemoteCache(remoteName string, remoteAlloc *Allocator) *RemoteCache { @@ -924,7 +942,7 @@ func (a *Allocator) NewRemoteCache(remoteName string, remoteAlloc *Allocator) *R // kvstore will be maintained in the RemoteCache structure returned and will // start being reported in the identities returned by the ForeachCache() // function. RemoteName should be unique per logical "remote". -func (a *Allocator) WatchRemoteKVStore(ctx context.Context, rc *RemoteCache) { +func (a *Allocator) WatchRemoteKVStore(ctx context.Context, rc *RemoteCache, onSync func(context.Context)) { scopedLog := log.WithField(logfields.ClusterName, rc.name) scopedLog.Info("Starting remote kvstore watcher") @@ -981,6 +999,9 @@ func (a *Allocator) WatchRemoteKVStore(ctx context.Context, rc *RemoteCache) { rc.cache.mutex.RUnlock() } + // Execute the on-sync callback handler. + onSync(ctx) + <-ctx.Done() rc.close() scopedLog.Info("Stopped remote kvstore watcher") @@ -1002,8 +1023,8 @@ func (a *Allocator) RemoveRemoteKVStore(remoteName string) { // Watch starts watching the remote kvstore and synchronize the identities in // the local cache. It blocks until the context is closed. -func (rc *RemoteCache) Watch(ctx context.Context) { - rc.watchFunc(ctx, rc) +func (rc *RemoteCache) Watch(ctx context.Context, onSync func(context.Context)) { + rc.watchFunc(ctx, rc, onSync) } // NumEntries returns the number of entries in the remote cache diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go new file mode 100644 index 00000000000..959e9037303 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package exec provides useful wrappers around the standard "exec" library. +package exec diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go new file mode 100644 index 00000000000..f5bd6400eeb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package exec + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "os/exec" + "time" + + "github.com/sirupsen/logrus" +) + +func warnToLog(cmd *exec.Cmd, out []byte, scopedLog *logrus.Entry, err error) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + scopedLog.Warn(scanner.Text()) + } +} + +// combinedOutput is the core implementation of catching deadline exceeded +// options and logging errors. +func combinedOutput(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := cmd.CombinedOutput() + if ctx.Err() != nil { + if !errors.Is(ctx.Err(), context.Canceled) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + } + return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err()) + } + if err != nil && verbose { + warnToLog(cmd, out, scopedLog, err) + } + return out, err +} + +// output is the equivalent to combinedOutput with only capturing stdout +func output(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := cmd.Output() + if ctx.Err() != nil { + if !errors.Is(ctx.Err(), context.Canceled) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + } + return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err()) + } + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + err = fmt.Errorf("%w stderr=%q", exitErr, exitErr.Stderr) + } + if verbose { + warnToLog(cmd, out, scopedLog, err) + } + } + return out, err +} + +// Cmd wraps exec.Cmd with a context to provide convenient execution of a +// command with nice checking of the context timeout in the form: +// +// err := exec.Prog().WithTimeout(5*time.Second, myprog, myargs...).CombinedOutput(log, verbose) +type Cmd struct { + *exec.Cmd + ctx context.Context + cancelFn func() +} + +// CommandContext wraps exec.CommandContext to allow this package to be used as +// a drop-in replacement for the standard exec library. +func CommandContext(ctx context.Context, prog string, args ...string) *Cmd { + return &Cmd{ + Cmd: exec.CommandContext(ctx, prog, args...), + ctx: ctx, + } +} + +// WithTimeout creates a Cmd with a context that times out after the specified +// duration. +func WithTimeout(timeout time.Duration, prog string, args ...string) *Cmd { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + cmd := CommandContext(ctx, prog, args...) + cmd.cancelFn = cancel + return cmd +} + +// WithCancel creates a Cmd with a context that can be cancelled by calling the +// resulting Cancel() function. +func WithCancel(ctx context.Context, prog string, args ...string) (*Cmd, context.CancelFunc) { + newCtx, cancel := context.WithCancel(ctx) + cmd := CommandContext(newCtx, prog, args...) + return cmd, cancel +} + +// CombinedOutput runs the command and returns its combined standard output and +// standard error. Unlike the standard library, if the context is exceeded, it +// will return an error indicating so. +// +// Logs any errors that occur to the specified logger. +func (c *Cmd) CombinedOutput(scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := combinedOutput(c.ctx, c.Cmd, scopedLog, verbose) + if c.cancelFn != nil { + c.cancelFn() + } + return out, err +} + +// Output runs the command and returns only standard output, but not the +// standard error. Unlike the standard library, if the context is exceeded, +// it will return an error indicating so. +// +// Logs any errors that occur to the specified logger. +func (c *Cmd) Output(scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := output(c.ctx, c.Cmd, scopedLog, verbose) + if c.cancelFn != nil { + c.cancelFn() + } + return out, err +} diff --git a/vendor/github.com/cilium/cilium/pkg/controller/manager.go b/vendor/github.com/cilium/cilium/pkg/controller/manager.go index 3c11c89a9ab..675601bfa84 100644 --- a/vendor/github.com/cilium/cilium/pkg/controller/manager.go +++ b/vendor/github.com/cilium/cilium/pkg/controller/manager.go @@ -55,6 +55,7 @@ func (m *Manager) updateController(name string, params ControllerParams) *manage start := time.Now() m.mutex.Lock() + defer m.mutex.Unlock() if m.controllers == nil { m.controllers = controllerMap{} @@ -70,36 +71,57 @@ func (m *Manager) updateController(name string, params ControllerParams) *manage case ctrl.update <- ctrl.params: default: } - m.mutex.Unlock() ctrl.getLogger().Debug("Controller update time: ", time.Since(start)) } else { - ctrl = &managedController{ - controller: controller{ - name: name, - uuid: uuid.New().String(), - stop: make(chan struct{}), - update: make(chan ControllerParams, 1), - trigger: make(chan struct{}, 1), - terminated: make(chan struct{}), - }, - } - ctrl.updateParamsLocked(params) - ctrl.getLogger().Debug("Starting new controller") + return m.createControllerLocked(name, params) + } + return ctrl +} - m.controllers[ctrl.name] = ctrl - m.mutex.Unlock() +func (m *Manager) createControllerLocked(name string, params ControllerParams) *managedController { + ctrl := &managedController{ + controller: controller{ + name: name, + uuid: uuid.New().String(), + stop: make(chan struct{}), + update: make(chan ControllerParams, 1), + trigger: make(chan struct{}, 1), + terminated: make(chan struct{}), + }, + } + ctrl.updateParamsLocked(params) + ctrl.getLogger().Debug("Starting new controller") - globalStatus.mutex.Lock() - globalStatus.controllers[ctrl.uuid] = ctrl - globalStatus.mutex.Unlock() + m.controllers[ctrl.name] = ctrl - go ctrl.runController(ctrl.params) - } + globalStatus.mutex.Lock() + globalStatus.controllers[ctrl.uuid] = ctrl + globalStatus.mutex.Unlock() + go ctrl.runController(ctrl.params) return ctrl } +// CreateController installs a new controller in the +// manager. If a controller with the name already exists +// this method returns false without triggering, otherwise +// creates the controller and runs it immediately. +func (m *Manager) CreateController(name string, params ControllerParams) bool { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.controllers != nil { + if _, exists := m.controllers[name]; exists { + return false + } + } else { + m.controllers = controllerMap{} + } + m.createControllerLocked(name, params) + return true +} + func (m *Manager) removeController(ctrl *managedController) { ctrl.stopController() delete(m.controllers, ctrl.name) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go new file mode 100644 index 00000000000..33ca8842645 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + "fmt" + "sync" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/link" + "golang.org/x/sys/unix" +) + +// HaveAttachCgroup returns nil if the kernel is compiled with +// CONFIG_CGROUP_BPF. +// +// It's only an approximation and doesn't execute a successful cgroup attachment +// under the hood. If any unexpected errors are encountered, the original error +// is returned. +func HaveAttachCgroup() error { + attachCgroupOnce.Do(func() { + attachCgroupResult = haveAttachCgroup() + }) + + return attachCgroupResult +} + +func haveAttachCgroup() error { + // Load known-good program supported by the earliest kernels with cgroup + // support. + spec := &ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + } + + p, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err != nil { + return fmt.Errorf("create cgroup program: %w: %w", err, ebpf.ErrNotSupported) + } + defer p.Close() + + // Attaching to a non-cgroup node should result in EBADF when creating the + // link, compared to EINVAL if the kernel does not support or was compiled + // without CONFIG_CGROUP_BPF. + _, err = link.AttachCgroup(link.CgroupOptions{Path: "/dev/null", Program: p, Attach: spec.AttachType}) + if errors.Is(err, unix.EBADF) { + // The kernel checked the given file descriptor from within the cgroup prog + // attach handler. Assume it supports attaching cgroup progs. + return nil + } + if err != nil { + // Preserve the original error in the error string. Needs Go 1.20. + return fmt.Errorf("link cgroup program to /dev/null: %w: %w", err, ebpf.ErrNotSupported) + } + + return errors.New("attaching prog to /dev/null did not result in error") +} + +var attachCgroupOnce sync.Once +var attachCgroupResult error diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go new file mode 100644 index 00000000000..5ce0c0aa9a8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + + "golang.org/x/sys/unix" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + + "github.com/cilium/cilium/pkg/lock" +) + +// HaveAttachType returns nil if the given program/attach type combination is +// supported by the underlying kernel. Returns ebpf.ErrNotSupported if loading a +// program with the given Program/AttachType fails. If the probe is inconclusive +// due to an unrecognized return code, the original error is returned. +// +// Note that program types that don't use attach types will silently succeed if +// an attach type is specified. +// +// Probe results are cached by the package and shouldn't be memoized by the +// caller. +func HaveAttachType(pt ebpf.ProgramType, at ebpf.AttachType) (err error) { + if err := features.HaveProgramType(pt); err != nil { + return err + } + + attachProbesMu.Lock() + defer attachProbesMu.Unlock() + if err, ok := attachProbes[attachProbe{pt, at}]; ok { + return err + } + + defer func() { + // Closes over named return variable err to cache any returned errors. + attachProbes[attachProbe{pt, at}] = err + }() + + spec := &ebpf.ProgramSpec{ + Type: pt, + AttachType: at, + Instructions: asm.Instructions{ + // recvmsg and peername require a return value of 1, use it for all probes. + asm.LoadImm(asm.R0, 1, asm.DWord), + asm.Return(), + }, + } + + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + // EINVAL occurs when attempting to create a program with an unknown type. + // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given prog type. + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.E2BIG) { + err = ebpf.ErrNotSupported + } + if err != nil { + return err + } + + return nil +} + +type attachProbe struct { + pt ebpf.ProgramType + at ebpf.AttachType +} + +var attachProbesMu lock.Mutex +var attachProbes map[attachProbe]error = make(map[attachProbe]error) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go new file mode 100644 index 00000000000..285c8851d52 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package probes provides BPF features checks based on bpftool. +package probes diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go new file mode 100644 index 00000000000..c815eb729e4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "time" +) + +// Available CONFIG_HZ values, sorted from highest to lowest. +var hzValues = []uint16{1000, 300, 250, 100} + +// KernelHZ attempts to estimate the kernel's CONFIG_HZ compile-time value by +// making snapshots of the kernel timestamp with a time interval in between. +// +// Blocks for at least 100ms while the measurement is in progress. Can block +// significantly longer under some hypervisors like VirtualBox due to buggy +// clocks, interrupt coalescing and low timer resolution. +func KernelHZ() (uint16, error) { + f, err := os.Open("/proc/schedstat") + if err != nil { + return 0, err + } + defer f.Close() + + // Measure the kernel timestamp at least 100ms apart, giving kernel timer and + // wall clock ample opportunity to advance for adequate sample size. + j1, err := readSchedstat(f) + if err != nil { + return 0, err + } + + // On some platforms, this can put the goroutine to sleep for significantly + // longer than 100ms. Do not rely on readings being anywhere near 100ms apart. + time.Sleep(time.Millisecond * 100) + + j2, err := readSchedstat(f) + if err != nil { + return 0, err + } + + hz, err := j1.interpolate(j2) + if err != nil { + return 0, fmt.Errorf("interpolating hz value: %w", err) + } + + return nearest(hz, hzValues) +} + +// Jiffies returns the kernel's internal timestamp in jiffies read from +// /proc/schedstat. +func Jiffies() (uint64, error) { + f, err := os.Open("/proc/schedstat") + if err != nil { + return 0, err + } + defer f.Close() + + k, err := readSchedstat(f) + if err != nil { + return 0, err + } + + return k.k, nil +} + +// readSchedstat expects to read /proc/schedstat and returns the first line +// matching 'timestamp %d'. Upon return, f is rewound to allow reuse. +// +// Should not be called concurrently. +func readSchedstat(f io.ReadSeeker) (ktime, error) { + // Rewind the file when done so the next call gets fresh data. + defer func() { _, _ = f.Seek(0, 0) }() + + var j uint64 + var t = time.Now() + + s := bufio.NewScanner(f) + for s.Scan() { + if _, err := fmt.Sscanf(s.Text(), "timestamp %d", &j); err == nil { + return ktime{j, t}, nil + } + } + + return ktime{}, errors.New("no kernel timestamp found") +} + +type ktime struct { + k uint64 + t time.Time +} + +// interpolate returns the amount of jiffies (ktime) that would have elapsed if +// both ktimes were measured exactly 1 second apart. Using linear interpolation, +// the delta between both kernel timestamps is adjusted based on the elapsed +// wall time between both measurements. +func (old ktime) interpolate(new ktime) (uint16, error) { + if old.t.After(new.t) { + return 0, fmt.Errorf("old wall time %v is more recent than %v", old.t, new.t) + } + if old.k > new.k { + return 0, fmt.Errorf("old kernel timer %d is higher than %d", old.k, new.k) + } + + // Jiffy and duration delta. + kd := new.k - old.k + td := new.t.Sub(old.t) + + // Linear interpolation to represent elapsed jiffies as a per-second value. + hz := float64(kd) / td.Seconds() + hz = math.Round(hz) + if hz > math.MaxUint16 { + return 0, fmt.Errorf("interpolated hz value would overflow uint16: %f", hz) + } + + return uint16(hz), nil +} + +// nearest returns the entry from values that's closest to in. If in has an +// equal distance to multiple values, the value that appears the earliest in +// values wins. Returns error if values is empty. +func nearest(in uint16, values []uint16) (uint16, error) { + if len(values) == 0 { + return 0, errors.New("values cannot be empty") + } + + var out uint16 + min := ^uint16(0) + for _, v := range values { + // Get absolute distance between in and v. + d := uint16(in - v) + if in < v { + d = v - in + } + + // Check if the distance to the current number is smaller than to the + // previous number. + if d < min { + min = d + out = v + } + } + + return out, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go new file mode 100644 index 00000000000..f260c1e2ffb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + "fmt" + "net" + "runtime" + "sync" + + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +var ( + managedNeighborOnce sync.Once + managedNeighborResult error +) + +// HaveManagedNeighbors returns nil if the host supports managed neighbor entries (NTF_EXT_MANAGED). +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveManagedNeighbors() error { + managedNeighborOnce.Do(func() { + ch := make(chan struct{}) + + // In order to call haveManagedNeighbors safely, it has to be started + // in a goroutine, so we can make sure the goroutine ends when the function exits. + // This makes sure the underlying OS thread exits if we fail to restore it to the original netns. + go func() { + managedNeighborResult = haveManagedNeighbors() + close(ch) + }() + <-ch // wait for probe to finish + + // if we encounter a different error than ErrNotSupported, terminate the agent. + if managedNeighborResult != nil && !errors.Is(managedNeighborResult, ErrNotSupported) { + log.WithError(managedNeighborResult).Fatal("failed to probe managed neighbor support") + } + }) + + return managedNeighborResult +} + +func haveManagedNeighbors() (outer error) { + runtime.LockOSThread() + oldns, err := netns.Get() + if err != nil { + return fmt.Errorf("failed to get current netns: %w", err) + } + defer oldns.Close() + + newns, err := netns.New() + if err != nil { + return fmt.Errorf("failed to create new netns: %w", err) + } + defer newns.Close() + defer func() { + // defer closes over named return variable err + if nerr := netns.Set(oldns); nerr != nil { + // The current goroutine is locked to an OS thread and we've failed + // to undo state modifications to the thread. Returning without unlocking + // the goroutine will make sure the underlying OS thread dies. + outer = fmt.Errorf("error setting thread back to its original netns: %w (original error: %s)", nerr, outer) + return + } + // only now that we have successfully changed the thread back to its + // original state (netns) we can safely unlock the goroutine from its OS thread. + runtime.UnlockOSThread() + }() + + // Use a veth device instead of a dummy to avoid the kernel having to modprobe + // the dummy kmod, which could potentially be compiled out. veth is currently + // a hard dependency for Cilium, so safe to assume the module is available if + // not already loaded. + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "veth0"}, + PeerName: "veth1", + } + + if err := netlink.LinkAdd(veth); err != nil { + return fmt.Errorf("failed to add dummy veth: %w", err) + } + + neigh := netlink.Neigh{ + LinkIndex: veth.Index, + IP: net.IPv4(0, 0, 0, 1), + Flags: NTF_EXT_LEARNED, + FlagsExt: NTF_EXT_MANAGED, + } + + if err := netlink.NeighAdd(&neigh); err != nil { + return fmt.Errorf("failed to add neighbor: %w", err) + } + + nl, err := netlink.NeighList(veth.Index, 0) + if err != nil { + return fmt.Errorf("failed to list neighbors: %w", err) + } + + for _, n := range nl { + if !n.IP.Equal(neigh.IP) { + continue + } + if n.Flags != NTF_EXT_LEARNED { + continue + } + if n.FlagsExt != NTF_EXT_MANAGED { + continue + } + + return nil + } + + return ErrNotSupported +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go new file mode 100644 index 00000000000..7f0831e7cf1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go @@ -0,0 +1,662 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "text/template" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + "github.com/cilium/ebpf/rlimit" + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/command/exec" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "probes") + once sync.Once + probeManager *ProbeManager + tpl = template.New("headerfile") +) + +func init() { + const content = ` +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright Authors of Cilium */ + +/* THIS FILE WAS GENERATED DURING AGENT STARTUP. */ + +#pragma once + +{{- if not .Common}} +#include "features.h" +{{- end}} + +{{- range $key, $value := .Features}} +{{- if $value}} +#define {{$key}} 1 +{{end}} +{{- end}} +` + var err error + tpl, err = tpl.Parse(content) + if err != nil { + log.WithError(err).Fatal("could not parse headerfile template") + } +} + +// ErrNotSupported indicates that a feature is not supported by the current kernel. +var ErrNotSupported = errors.New("not supported") + +// KernelParam is a type based on string which represents CONFIG_* kernel +// parameters which usually have values "y", "n" or "m". +type KernelParam string + +// Enabled checks whether the kernel parameter is enabled. +func (kp KernelParam) Enabled() bool { + return kp == "y" +} + +// Module checks whether the kernel parameter is enabled as a module. +func (kp KernelParam) Module() bool { + return kp == "m" +} + +// kernelOption holds information about kernel parameters to probe. +type kernelOption struct { + Description string + Enabled bool + CanBeModule bool +} + +type ProgramHelper struct { + Program ebpf.ProgramType + Helper asm.BuiltinFunc +} + +type miscFeatures struct { + HaveLargeInsnLimit bool + HaveFibIfindex bool +} + +type FeatureProbes struct { + ProgramHelpers map[ProgramHelper]bool + Misc miscFeatures +} + +// SystemConfig contains kernel configuration and sysctl parameters related to +// BPF functionality. +type SystemConfig struct { + UnprivilegedBpfDisabled int `json:"unprivileged_bpf_disabled"` + BpfJitEnable int `json:"bpf_jit_enable"` + BpfJitHarden int `json:"bpf_jit_harden"` + BpfJitKallsyms int `json:"bpf_jit_kallsyms"` + BpfJitLimit int `json:"bpf_jit_limit"` + ConfigBpf KernelParam `json:"CONFIG_BPF"` + ConfigBpfSyscall KernelParam `json:"CONFIG_BPF_SYSCALL"` + ConfigHaveEbpfJit KernelParam `json:"CONFIG_HAVE_EBPF_JIT"` + ConfigBpfJit KernelParam `json:"CONFIG_BPF_JIT"` + ConfigBpfJitAlwaysOn KernelParam `json:"CONFIG_BPF_JIT_ALWAYS_ON"` + ConfigCgroups KernelParam `json:"CONFIG_CGROUPS"` + ConfigCgroupBpf KernelParam `json:"CONFIG_CGROUP_BPF"` + ConfigCgroupNetClassID KernelParam `json:"CONFIG_CGROUP_NET_CLASSID"` + ConfigSockCgroupData KernelParam `json:"CONFIG_SOCK_CGROUP_DATA"` + ConfigBpfEvents KernelParam `json:"CONFIG_BPF_EVENTS"` + ConfigKprobeEvents KernelParam `json:"CONFIG_KPROBE_EVENTS"` + ConfigUprobeEvents KernelParam `json:"CONFIG_UPROBE_EVENTS"` + ConfigTracing KernelParam `json:"CONFIG_TRACING"` + ConfigFtraceSyscalls KernelParam `json:"CONFIG_FTRACE_SYSCALLS"` + ConfigFunctionErrorInjection KernelParam `json:"CONFIG_FUNCTION_ERROR_INJECTION"` + ConfigBpfKprobeOverride KernelParam `json:"CONFIG_BPF_KPROBE_OVERRIDE"` + ConfigNet KernelParam `json:"CONFIG_NET"` + ConfigXdpSockets KernelParam `json:"CONFIG_XDP_SOCKETS"` + ConfigLwtunnelBpf KernelParam `json:"CONFIG_LWTUNNEL_BPF"` + ConfigNetActBpf KernelParam `json:"CONFIG_NET_ACT_BPF"` + ConfigNetClsBpf KernelParam `json:"CONFIG_NET_CLS_BPF"` + ConfigNetClsAct KernelParam `json:"CONFIG_NET_CLS_ACT"` + ConfigNetSchIngress KernelParam `json:"CONFIG_NET_SCH_INGRESS"` + ConfigXfrm KernelParam `json:"CONFIG_XFRM"` + ConfigIPRouteClassID KernelParam `json:"CONFIG_IP_ROUTE_CLASSID"` + ConfigIPv6Seg6Bpf KernelParam `json:"CONFIG_IPV6_SEG6_BPF"` + ConfigBpfLircMode2 KernelParam `json:"CONFIG_BPF_LIRC_MODE2"` + ConfigBpfStreamParser KernelParam `json:"CONFIG_BPF_STREAM_PARSER"` + ConfigNetfilterXtMatchBpf KernelParam `json:"CONFIG_NETFILTER_XT_MATCH_BPF"` + ConfigBpfilter KernelParam `json:"CONFIG_BPFILTER"` + ConfigBpfilterUmh KernelParam `json:"CONFIG_BPFILTER_UMH"` + ConfigTestBpf KernelParam `json:"CONFIG_TEST_BPF"` + ConfigKernelHz KernelParam `json:"CONFIG_HZ"` +} + +// MapTypes contains bools indicating which types of BPF maps the currently +// running kernel supports. +type MapTypes struct { + HaveHashMapType bool `json:"have_hash_map_type"` + HaveArrayMapType bool `json:"have_array_map_type"` + HaveProgArrayMapType bool `json:"have_prog_array_map_type"` + HavePerfEventArrayMapType bool `json:"have_perf_event_array_map_type"` + HavePercpuHashMapType bool `json:"have_percpu_hash_map_type"` + HavePercpuArrayMapType bool `json:"have_percpu_array_map_type"` + HaveStackTraceMapType bool `json:"have_stack_trace_map_type"` + HaveCgroupArrayMapType bool `json:"have_cgroup_array_map_type"` + HaveLruHashMapType bool `json:"have_lru_hash_map_type"` + HaveLruPercpuHashMapType bool `json:"have_lru_percpu_hash_map_type"` + HaveLpmTrieMapType bool `json:"have_lpm_trie_map_type"` + HaveArrayOfMapsMapType bool `json:"have_array_of_maps_map_type"` + HaveHashOfMapsMapType bool `json:"have_hash_of_maps_map_type"` + HaveDevmapMapType bool `json:"have_devmap_map_type"` + HaveSockmapMapType bool `json:"have_sockmap_map_type"` + HaveCpumapMapType bool `json:"have_cpumap_map_type"` + HaveXskmapMapType bool `json:"have_xskmap_map_type"` + HaveSockhashMapType bool `json:"have_sockhash_map_type"` + HaveCgroupStorageMapType bool `json:"have_cgroup_storage_map_type"` + HaveReuseportSockarrayMapType bool `json:"have_reuseport_sockarray_map_type"` + HavePercpuCgroupStorageMapType bool `json:"have_percpu_cgroup_storage_map_type"` + HaveQueueMapType bool `json:"have_queue_map_type"` + HaveStackMapType bool `json:"have_stack_map_type"` +} + +// Features contains BPF feature checks returned by bpftool. +type Features struct { + SystemConfig `json:"system_config"` + MapTypes `json:"map_types"` +} + +// ProbeManager is a manager of BPF feature checks. +type ProbeManager struct { + features Features +} + +// NewProbeManager returns a new instance of ProbeManager - a manager of BPF +// feature checks. +func NewProbeManager() *ProbeManager { + newProbeManager := func() { + probeManager = &ProbeManager{} + probeManager.features = probeManager.Probe() + } + once.Do(newProbeManager) + return probeManager +} + +// Probe probes the underlying kernel for features. +func (*ProbeManager) Probe() Features { + var features Features + out, err := exec.WithTimeout( + defaults.ExecTimeout, + "bpftool", "-j", "feature", "probe", + ).CombinedOutput(log, true) + if err != nil { + log.WithError(err).Fatal("could not run bpftool") + } + if err := json.Unmarshal(out, &features); err != nil { + log.WithError(err).Fatal("could not parse bpftool output") + } + return features +} + +// SystemConfigProbes performs a check of kernel configuration parameters. It +// returns an error when parameters required by Cilium are not enabled. It logs +// warnings when optional parameters are not enabled. +// +// When kernel config file is not found, bpftool can't probe kernel configuration +// parameter real setting, so only return error log when kernel config file exists +// and kernel configuration parameter setting is disabled +func (p *ProbeManager) SystemConfigProbes() error { + var notFound bool + if !p.KernelConfigAvailable() { + notFound = true + log.Info("Kernel config file not found: if the agent fails to start, check the system requirements at https://docs.cilium.io/en/stable/operations/system_requirements") + } + requiredParams := p.GetRequiredConfig() + for param, kernelOption := range requiredParams { + if !kernelOption.Enabled && !notFound { + module := "" + if kernelOption.CanBeModule { + module = " or module" + } + return fmt.Errorf("%s kernel parameter%s is required (needed for: %s)", param, module, kernelOption.Description) + } + } + optionalParams := p.GetOptionalConfig() + for param, kernelOption := range optionalParams { + if !kernelOption.Enabled && !notFound { + module := "" + if kernelOption.CanBeModule { + module = " or module" + } + log.Warningf("%s optional kernel parameter%s is not in kernel (needed for: %s)", param, module, kernelOption.Description) + } + } + return nil +} + +// GetRequiredConfig performs a check of mandatory kernel configuration options. It +// returns a map indicating which required kernel parameters are enabled - and which are not. +// GetRequiredConfig is being used by CLI "cilium kernel-check". +func (p *ProbeManager) GetRequiredConfig() map[KernelParam]kernelOption { + config := p.features.SystemConfig + coreInfraDescription := "Essential eBPF infrastructure" + kernelParams := make(map[KernelParam]kernelOption) + + kernelParams["CONFIG_BPF"] = kernelOption{ + Enabled: config.ConfigBpf.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_BPF_SYSCALL"] = kernelOption{ + Enabled: config.ConfigBpfSyscall.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_NET_SCH_INGRESS"] = kernelOption{ + Enabled: config.ConfigNetSchIngress.Enabled() || config.ConfigNetSchIngress.Module(), + Description: coreInfraDescription, + CanBeModule: true, + } + kernelParams["CONFIG_NET_CLS_BPF"] = kernelOption{ + Enabled: config.ConfigNetClsBpf.Enabled() || config.ConfigNetClsBpf.Module(), + Description: coreInfraDescription, + CanBeModule: true, + } + kernelParams["CONFIG_NET_CLS_ACT"] = kernelOption{ + Enabled: config.ConfigNetClsAct.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_BPF_JIT"] = kernelOption{ + Enabled: config.ConfigBpfJit.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_HAVE_EBPF_JIT"] = kernelOption{ + Enabled: config.ConfigHaveEbpfJit.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + + return kernelParams +} + +// GetOptionalConfig performs a check of *optional* kernel configuration options. It +// returns a map indicating which optional/non-mandatory kernel parameters are enabled. +// GetOptionalConfig is being used by CLI "cilium kernel-check". +func (p *ProbeManager) GetOptionalConfig() map[KernelParam]kernelOption { + config := p.features.SystemConfig + kernelParams := make(map[KernelParam]kernelOption) + + kernelParams["CONFIG_CGROUP_BPF"] = kernelOption{ + Enabled: config.ConfigCgroupBpf.Enabled(), + Description: "Host Reachable Services and Sockmap optimization", + CanBeModule: false, + } + kernelParams["CONFIG_LWTUNNEL_BPF"] = kernelOption{ + Enabled: config.ConfigLwtunnelBpf.Enabled(), + Description: "Lightweight Tunnel hook for IP-in-IP encapsulation", + CanBeModule: false, + } + kernelParams["CONFIG_BPF_EVENTS"] = kernelOption{ + Enabled: config.ConfigBpfEvents.Enabled(), + Description: "Visibility and congestion management with datapath", + CanBeModule: false, + } + + return kernelParams +} + +// KernelConfigAvailable checks if the Kernel Config is available on the +// system or not. +func (p *ProbeManager) KernelConfigAvailable() bool { + // Check Kernel Config is available or not. + // We are replicating BPFTools logic here to check if kernel config is available + // https://elixir.bootlin.com/linux/v5.7/source/tools/bpf/bpftool/feature.c#L390 + info := unix.Utsname{} + err := unix.Uname(&info) + if err != nil { + return false + } + release := strings.TrimSpace(string(bytes.Trim(info.Release[:], "\x00"))) + + // Any error checking these files will return Kernel config not found error + if _, err := os.Stat(fmt.Sprintf("/boot/config-%s", release)); err != nil { + if _, err = os.Stat("/proc/config.gz"); err != nil { + return false + } + } + + return true +} + +// HaveMapType is a wrapper around features.HaveMapType() to check if a certain +// BPF map type is supported by the kernel. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveMapType(mt ebpf.MapType) error { + err := features.HaveMapType(mt) + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).WithField("maptype", mt).Fatal("failed to probe MapType") + } + return nil +} + +// HaveProgramType is a wrapper around features.HaveProgramType() to check +// if a certain BPF program type is supported by the kernel. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveProgramType(pt ebpf.ProgramType) error { + err := features.HaveProgramType(pt) + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).WithField("programtype", pt).Fatal("failed to probe ProgramType") + } + return nil +} + +// HaveProgramHelper is a wrapper around features.HaveProgramHelper() to +// check if a certain BPF program/helper copmbination is supported by the kernel. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + err := features.HaveProgramHelper(pt, helper) + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).WithField("programtype", pt).WithField("helper", helper).Fatal("failed to probe helper") + } + return nil +} + +// HaveLargeInstructionLimit is a wrapper around features.HaveLargeInstructions() +// to check if the kernel supports the 1 Million instruction limit. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveLargeInstructionLimit() error { + err := features.HaveLargeInstructions() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe large instruction limit") + } + return nil +} + +// HaveBoundedLoops is a wrapper around features.HaveBoundedLoops() +// to check if the kernel supports bounded loops in BPF programs. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveBoundedLoops() error { + err := features.HaveBoundedLoops() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe bounded loops") + } + return nil +} + +// HaveFibIfindex checks if kernel has d1c362e1dd68 ("bpf: Always return target +// ifindex in bpf_fib_lookup") which is 5.10+. This got merged in the same kernel +// as the new redirect helpers. +func HaveFibIfindex() error { + return features.HaveProgramHelper(ebpf.SchedCLS, asm.FnRedirectPeer) +} + +// HaveV2ISA is a wrapper around features.HaveV2ISA() to check if the kernel +// supports the V2 ISA. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveV2ISA() error { + err := features.HaveV2ISA() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe V2 ISA") + } + return nil +} + +// HaveV3ISA is a wrapper around features.HaveV3ISA() to check if the kernel +// supports the V3 ISA. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveV3ISA() error { + err := features.HaveV3ISA() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe V3 ISA") + } + return nil +} + +// HaveOuterSourceIPSupport tests whether the kernel support setting the outer +// source IP address via the bpf_skb_set_tunnel_key BPF helper. We can't rely +// on the verifier to reject a program using the new support because the +// verifier just accepts any argument size for that helper; non-supported +// fields will simply not be used. Instead, we set the outer source IP and +// retrieve it with bpf_skb_get_tunnel_key right after. If the retrieved value +// equals the value set, we have a confirmation the kernel supports it. +func HaveOuterSourceIPSupport() (err error) { + defer func() { + if err != nil && !errors.Is(err, ebpf.ErrNotSupported) { + log.WithError(err).Fatal("failed to probe for outer source IP support") + } + }() + + if err := rlimit.RemoveMemlock(); err != nil { + return err + } + + progSpec := &ebpf.ProgramSpec{ + Name: "set_tunnel_key_probe", + Type: ebpf.SchedACT, + License: "GPL", + } + progSpec.Instructions = asm.Instructions{ + asm.Mov.Reg(asm.R8, asm.R1), + + asm.Mov.Imm(asm.R2, 0), + asm.StoreMem(asm.RFP, -8, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -16, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -24, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -32, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -40, asm.R2, asm.DWord), + asm.Mov.Imm(asm.R2, 42), + asm.StoreMem(asm.RFP, -44, asm.R2, asm.Word), + asm.Mov.Reg(asm.R2, asm.RFP), + asm.Add.Imm(asm.R2, -44), + asm.Mov.Imm(asm.R3, 44), // sizeof(struct bpf_tunnel_key) when setting the outer source IP is supported. + asm.Mov.Imm(asm.R4, 0), + asm.FnSkbSetTunnelKey.Call(), + + asm.Mov.Reg(asm.R1, asm.R8), + asm.Mov.Reg(asm.R2, asm.RFP), + asm.Add.Imm(asm.R2, -44), + asm.Mov.Imm(asm.R3, 44), + asm.Mov.Imm(asm.R4, 0), + asm.FnSkbGetTunnelKey.Call(), + + asm.LoadMem(asm.R0, asm.RFP, -44, asm.Word), + asm.Return(), + } + prog, err := ebpf.NewProgram(progSpec) + if err != nil { + return err + } + defer prog.Close() + + pkt := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ret, _, err := prog.Test(pkt) + if err != nil { + return err + } + if ret != 42 { + return ebpf.ErrNotSupported + } + return nil +} + +// HaveIPv6Support tests whether kernel can open an IPv6 socket. This will +// also implicitly auto-load IPv6 kernel module if available and not yet +// loaded. +func HaveIPv6Support() error { + fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_STREAM, 0) + if errors.Is(err, unix.EAFNOSUPPORT) || errors.Is(err, unix.EPROTONOSUPPORT) { + return ErrNotSupported + } + unix.Close(fd) + return nil +} + +// CreateHeaderFiles creates C header files with macros indicating which BPF +// features are available in the kernel. +func CreateHeaderFiles(headerDir string, probes *FeatureProbes) error { + common, err := os.Create(filepath.Join(headerDir, "features.h")) + if err != nil { + return fmt.Errorf("could not create common features header file: %w", err) + } + defer common.Close() + if err := writeCommonHeader(common, probes); err != nil { + return fmt.Errorf("could not write common features header file: %w", err) + } + + skb, err := os.Create(filepath.Join(headerDir, "features_skb.h")) + if err != nil { + return fmt.Errorf("could not create skb related features header file: %w", err) + } + defer skb.Close() + if err := writeSkbHeader(skb, probes); err != nil { + return fmt.Errorf("could not write skb related features header file: %w", err) + } + + xdp, err := os.Create(filepath.Join(headerDir, "features_xdp.h")) + if err != nil { + return fmt.Errorf("could not create xdp related features header file: %w", err) + } + defer xdp.Close() + if err := writeXdpHeader(xdp, probes); err != nil { + return fmt.Errorf("could not write xdp related features header file: %w", err) + } + + return nil +} + +// ExecuteHeaderProbes probes the kernel for a specific set of BPF features +// which are currently used to generate various feature macros for the datapath. +// The probe results returned in FeatureProbes are then used in the respective +// function that writes the actual C macro definitions. +// Further needed probes should be added here, while new macro strings need to +// be added in the correct `write*Header()` function. +func ExecuteHeaderProbes() *FeatureProbes { + probes := FeatureProbes{ + ProgramHelpers: make(map[ProgramHelper]bool), + Misc: miscFeatures{}, + } + + progHelpers := []ProgramHelper{ + // common probes + {ebpf.CGroupSock, asm.FnGetNetnsCookie}, + {ebpf.CGroupSockAddr, asm.FnGetNetnsCookie}, + {ebpf.CGroupSockAddr, asm.FnGetSocketCookie}, + {ebpf.CGroupSock, asm.FnJiffies64}, + {ebpf.CGroupSockAddr, asm.FnJiffies64}, + {ebpf.SchedCLS, asm.FnJiffies64}, + {ebpf.XDP, asm.FnJiffies64}, + {ebpf.CGroupSockAddr, asm.FnSkLookupTcp}, + {ebpf.CGroupSockAddr, asm.FnSkLookupUdp}, + {ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId}, + {ebpf.CGroupSock, asm.FnSetRetval}, + {ebpf.SchedCLS, asm.FnRedirectNeigh}, + {ebpf.SchedCLS, asm.FnRedirectPeer}, + + // skb related probes + {ebpf.SchedCLS, asm.FnSkbChangeTail}, + {ebpf.SchedCLS, asm.FnFibLookup}, + {ebpf.SchedCLS, asm.FnCsumLevel}, + + // xdp related probes + {ebpf.XDP, asm.FnFibLookup}, + } + for _, ph := range progHelpers { + probes.ProgramHelpers[ph] = (HaveProgramHelper(ph.Program, ph.Helper) == nil) + } + + probes.Misc.HaveLargeInsnLimit = (HaveLargeInstructionLimit() == nil) + probes.Misc.HaveFibIfindex = (HaveFibIfindex() == nil) + + return &probes +} + +// writeCommonHeader defines macross for bpf/include/bpf/features.h +func writeCommonHeader(writer io.Writer, probes *FeatureProbes) error { + features := map[string]bool{ + "HAVE_NETNS_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnGetNetnsCookie}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetNetnsCookie}], + "HAVE_SOCKET_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetSocketCookie}], + "HAVE_JIFFIES": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnJiffies64}], + "HAVE_SOCKET_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnSkLookupTcp}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnSkLookupUdp}], + "HAVE_CGROUP_ID": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId}], + "HAVE_LARGE_INSN_LIMIT": probes.Misc.HaveLargeInsnLimit, + "HAVE_SET_RETVAL": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnSetRetval}], + "HAVE_FIB_NEIGH": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnRedirectNeigh}], + "HAVE_FIB_IFINDEX": probes.Misc.HaveFibIfindex, + } + + return writeFeatureHeader(writer, features, true) +} + +// writeSkbHeader defines macros for bpf/include/bpf/features_skb.h +func writeSkbHeader(writer io.Writer, probes *FeatureProbes) error { + featuresSkb := map[string]bool{ + "HAVE_CHANGE_TAIL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnSkbChangeTail}], + "HAVE_FIB_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnFibLookup}], + "HAVE_CSUM_LEVEL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnCsumLevel}], + } + + return writeFeatureHeader(writer, featuresSkb, false) +} + +// writeXdpHeader defines macros for bpf/include/bpf/features_xdp.h +func writeXdpHeader(writer io.Writer, probes *FeatureProbes) error { + featuresXdp := map[string]bool{ + "HAVE_FIB_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnFibLookup}], + } + + return writeFeatureHeader(writer, featuresXdp, false) +} + +func writeFeatureHeader(writer io.Writer, features map[string]bool, common bool) error { + input := struct { + Common bool + Features map[string]bool + }{ + Common: common, + Features: features, + } + + if err := tpl.Execute(writer, input); err != nil { + return fmt.Errorf("could not write template: %w", err) + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go new file mode 100644 index 00000000000..846e9c28e0e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import "github.com/vishvananda/netlink" + +// Family type definitions +const ( + NTF_EXT_LEARNED = netlink.NTF_EXT_LEARNED + NTF_EXT_MANAGED = netlink.NTF_EXT_MANAGED +) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go new file mode 100644 index 00000000000..f92efd49902 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package probes + +// Dummy values on non-linux platform +const ( + NTF_EXT_LEARNED = iota + NTF_EXT_MANAGED +) diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go index 98388d8f233..eb6f644e83e 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go @@ -6,13 +6,13 @@ package cell import ( "context" "fmt" + "sort" "sync/atomic" "time" "github.com/cilium/cilium/pkg/lock" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // Level denotes what kind an update is. @@ -191,8 +191,8 @@ func (p *healthProvider) All() []Status { p.mu.RLock() all := maps.Values(p.moduleStatuses) p.mu.RUnlock() - slices.SortFunc(all, func(a, b Status) bool { - return a.ModuleID < b.ModuleID + sort.Slice(all, func(i, j int) bool { + return all[i].ModuleID < all[j].ModuleID }) return all } diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go index 449c2748a31..9275ec1a602 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go +++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go @@ -10,7 +10,6 @@ import ( ) type invoker struct { - cont container funcs []namedFunc } @@ -23,11 +22,11 @@ type InvokerList interface { AppendInvoke(func() error) } -func (i *invoker) invoke() error { +func (i *invoker) invoke(cont container) error { for _, afn := range i.funcs { log.WithField("function", afn.name).Debug("Invoking") t0 := time.Now() - if err := i.cont.Invoke(afn.fn); err != nil { + if err := cont.Invoke(afn.fn); err != nil { log.WithError(err).WithField("", afn.name).Error("Invoke failed") return err } @@ -39,7 +38,7 @@ func (i *invoker) invoke() error { func (i *invoker) Apply(c container) error { // Remember the scope in which we need to invoke. - i.cont = c + invoker := func() error { return i.invoke(c) } // Append the invoker to the list of invoke functions. These are invoked // prior to start to build up the objects. They are not invoked directly @@ -48,7 +47,7 @@ func (i *invoker) Apply(c container) error { // we don't yet know which command to run, but we still need to register // all the flags. return c.Invoke(func(l InvokerList) { - l.AppendInvoke(i.invoke) + l.AppendInvoke(invoker) }) } diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go b/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go index 3dbb7388546..b1d5507a7df 100644 --- a/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go +++ b/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go @@ -11,7 +11,7 @@ import ( // FlowProtocol returns the protocol best describing the flow. If available, // this is the L7 protocol name, then the L4 protocol name. func FlowProtocol(flow *pb.Flow) string { - switch flow.GetEventType().Type { + switch flow.GetEventType().GetType() { case monitorAPI.MessageTypeAccessLog: if l7 := flow.GetL7(); l7 != nil { switch { diff --git a/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go b/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go index fcbb8ed62f9..d0641f576b7 100644 --- a/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go +++ b/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go @@ -6,13 +6,23 @@ package key import ( "strings" + "golang.org/x/exp/maps" + "github.com/cilium/cilium/pkg/allocator" "github.com/cilium/cilium/pkg/labels" ) +const ( + // MetadataKeyBackendKey is the key used to store the backend key. + MetadataKeyBackendKey = iota +) + // GlobalIdentity is the structure used to store an identity type GlobalIdentity struct { labels.LabelArray + + // metadata contains metadata that are stored for example by the backends. + metadata map[any]any } // GetKey encodes an Identity as string @@ -32,7 +42,7 @@ func (gi *GlobalIdentity) GetAsMap() map[string]string { // PutKey decodes an Identity from its string representation func (gi *GlobalIdentity) PutKey(v string) allocator.AllocatorKey { - return &GlobalIdentity{labels.NewLabelArrayFromSortedList(v)} + return &GlobalIdentity{LabelArray: labels.NewLabelArrayFromSortedList(v)} } // PutKeyFromMap decodes an Identity from a map of key to value. Output @@ -40,5 +50,24 @@ func (gi *GlobalIdentity) PutKey(v string) allocator.AllocatorKey { // Note: NewLabelArrayFromMap will parse the ':' separated label source from // the keys because the source parameter is "" func (gi *GlobalIdentity) PutKeyFromMap(v map[string]string) allocator.AllocatorKey { - return &GlobalIdentity{labels.Map2Labels(v, "").LabelArray()} + return &GlobalIdentity{LabelArray: labels.Map2Labels(v, "").LabelArray()} +} + +// PutValue puts metadata inside the global identity for the given 'key' with +// the given 'value'. +func (gi *GlobalIdentity) PutValue(key, value any) allocator.AllocatorKey { + newMap := map[any]any{} + if gi.metadata != nil { + newMap = maps.Clone(gi.metadata) + } + newMap[key] = value + return &GlobalIdentity{ + LabelArray: gi.LabelArray, + metadata: newMap, + } +} + +// Value returns the value stored in the metadata map. +func (gi *GlobalIdentity) Value(key any) any { + return gi.metadata[key] } diff --git a/vendor/github.com/cilium/cilium/pkg/identity/reserved.go b/vendor/github.com/cilium/cilium/pkg/identity/reserved.go index 1af9522589a..ce56e918dcb 100644 --- a/vendor/github.com/cilium/cilium/pkg/identity/reserved.go +++ b/vendor/github.com/cilium/cilium/pkg/identity/reserved.go @@ -17,25 +17,30 @@ var ( ) // AddReservedIdentity adds the reserved numeric identity with the respective -// label into the map of reserved identity cache. -func AddReservedIdentity(ni NumericIdentity, lbl string) { +// label into the map of reserved identity cache, and returns the resulting Identity. +// This identity must not be mutated! +func AddReservedIdentity(ni NumericIdentity, lbl string) *Identity { identity := NewIdentity(ni, labels.Labels{lbl: labels.NewLabel(lbl, "", labels.LabelSourceReserved)}) cacheMU.Lock() reservedIdentityCache[ni] = identity cacheMU.Unlock() + return identity } // AddReservedIdentityWithLabels is the same as AddReservedIdentity but accepts -// multiple labels. -func AddReservedIdentityWithLabels(ni NumericIdentity, lbls labels.Labels) { +// multiple labels. Returns the resulting Identity. +// This identity must not be mutated! +func AddReservedIdentityWithLabels(ni NumericIdentity, lbls labels.Labels) *Identity { identity := NewIdentity(ni, lbls) cacheMU.Lock() reservedIdentityCache[ni] = identity cacheMU.Unlock() + return identity } // LookupReservedIdentity looks up a reserved identity by its NumericIdentity // and returns it if found. Returns nil if not found. +// This identity must not be mutated! func LookupReservedIdentity(ni NumericIdentity) *Identity { cacheMU.RLock() defer cacheMU.RUnlock() @@ -43,7 +48,9 @@ func LookupReservedIdentity(ni NumericIdentity) *Identity { } func init() { - iterateReservedIdentityLabels(AddReservedIdentityWithLabels) + iterateReservedIdentityLabels(func(ni NumericIdentity, lbls labels.Labels) { + AddReservedIdentityWithLabels(ni, lbls) + }) } // IterateReservedIdentities iterates over all reserved identities and diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/kvstore.go b/vendor/github.com/cilium/cilium/pkg/ipcache/kvstore.go index 46c8f1737cf..c77a71ad9b7 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipcache/kvstore.go +++ b/vendor/github.com/cilium/cilium/pkg/ipcache/kvstore.go @@ -177,7 +177,7 @@ type IPCacher interface { } // NewIPIdentityWatcher creates a new IPIdentityWatcher for the given cluster. -func NewIPIdentityWatcher(clusterName string, ipc IPCacher) *IPIdentityWatcher { +func NewIPIdentityWatcher(clusterName string, ipc IPCacher, opts ...storepkg.RWSOpt) *IPIdentityWatcher { watcher := IPIdentityWatcher{ ipcache: ipc, clusterName: clusterName, @@ -189,7 +189,7 @@ func NewIPIdentityWatcher(clusterName string, ipc IPCacher) *IPIdentityWatcher { clusterName, func() storepkg.Key { return &identity.IPIdentityPair{} }, &watcher, - storepkg.RWSWithOnSyncCallback(watcher.onSync), + append(opts, storepkg.RWSWithOnSyncCallback(watcher.onSync))..., ) return &watcher } diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/metadata.go b/vendor/github.com/cilium/cilium/pkg/ipcache/metadata.go index 0afd64f2ac1..486ca817ea0 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipcache/metadata.go +++ b/vendor/github.com/cilium/cilium/pkg/ipcache/metadata.go @@ -79,12 +79,22 @@ type metadata struct { // generate updates into the ipcache, policy engine and datapath. queuedChangesMU lock.Mutex queuedPrefixes map[netip.Prefix]struct{} + + // reservedHostLock protects the localHostLabels map. Holders must + // always take the metadata read lock first. + reservedHostLock lock.Mutex + + // reservedHostLabels collects all labels that apply to the host identity. + // see updateLocalHostLabels() for more info. + reservedHostLabels map[netip.Prefix]labels.Labels } func newMetadata() *metadata { return &metadata{ m: make(map[netip.Prefix]PrefixInfo), queuedPrefixes: make(map[netip.Prefix]struct{}), + + reservedHostLabels: make(map[netip.Prefix]labels.Labels), } } @@ -204,14 +214,13 @@ func (ipc *IPCache) InjectLabels(ctx context.Context, modifiedPrefixes []netip.P oldID, entryExists := ipc.LookupByIP(pstr) oldTunnelIP, oldEncryptionKey := ipc.GetHostIPCache(pstr) prefixInfo := ipc.metadata.getLocked(prefix) + var newID *identity.Identity if prefixInfo == nil { if !entryExists { // Already deleted, no new metadata to associate continue } // else continue below to remove the old entry } else { - var newID *identity.Identity - // Insert to propagate the updated set of labels after removal. newID, _, err = ipc.resolveIdentity(ctx, prefix, prefixInfo, identity.InvalidIdentity) if err != nil { @@ -303,6 +312,25 @@ func (ipc *IPCache) InjectLabels(ctx context.Context, modifiedPrefixes []netip.P entriesToDelete[prefix] = oldID } } + + // The reserved:host identity is special: the numeric ID is fixed, + // and the set of labels is mutable. Thus, whenever it changes, + // we must always update the SelectorCache (normally, this is elided + // when no changes are present). + if newID != nil && newID.ID == identity.ReservedIdentityHost { + idsToAdd[newID.ID] = newID.Labels.LabelArray() + } + + // Again, more reserved:host bookkeeping: if this prefix is no longer ID 1 (because + // it is being deleted or changing IDs), we need to recompute the labels + // for reserved:host and push that to the SelectorCache + if entryExists && oldID.ID == identity.ReservedIdentityHost && + (newID == nil || newID.ID != identity.ReservedIdentityHost) { + + i := ipc.updateReservedHostLabels(prefix, nil) + idsToAdd[i.ID] = i.Labels.LabelArray() + } + } // Don't hold lock while calling UpdateIdentities, as it will otherwise run into a deadlock ipc.metadata.RUnlock() @@ -452,8 +480,12 @@ func (ipc *IPCache) resolveIdentity(ctx context.Context, prefix netip.Prefix, in // for itself). For all other identities, we avoid modifying // the labels at runtime and instead opt to allocate new // identities below. - identity.AddReservedIdentityWithLabels(identity.ReservedIdentityHost, lbls) - return identity.LookupReservedIdentity(identity.ReservedIdentityHost), false, nil + // + // As an extra gotcha, we need need to merge all labels for all IPs + // that resolve to the reserved:host identity, otherwise we can + // flap identities labels depending on which prefix writes first. See GH-28259. + i := ipc.updateReservedHostLabels(prefix, lbls) + return i, false, nil } // If no other labels are associated with this IP, we assume that it's @@ -483,6 +515,36 @@ func (ipc *IPCache) resolveIdentity(ctx context.Context, prefix netip.Prefix, in return id, isNew, err } +// updateReservedHostLabels adds or removes labels that apply to the local host. +// The `reserved:host` identity is special: the numeric identity is fixed +// and the set of labels is mutable. (The datapath requires this.) So, +// we need to determine all prefixes that have the `reserved:host` label and +// capture their labels. Then, we must aggregate *all* labels from all prefixes and +// update the labels that correspond to the `reserved:host` identity. +// +// This could be termed a meta-ipcache. The ipcache metadata layer aggregates +// an arbitrary set of resources and labels to a prefix. Here, we are aggregating an arbitrary +// set of prefixes and labels to an identity. +func (ipc *IPCache) updateReservedHostLabels(prefix netip.Prefix, lbls labels.Labels) *identity.Identity { + ipc.metadata.reservedHostLock.Lock() + defer ipc.metadata.reservedHostLock.Unlock() + if lbls == nil { + delete(ipc.metadata.reservedHostLabels, prefix) + } else { + ipc.metadata.reservedHostLabels[prefix] = lbls + } + + // aggregate all labels and update static identity + newLabels := labels.NewFrom(labels.LabelHost) + for _, l := range ipc.metadata.reservedHostLabels { + newLabels.MergeLabels(l) + } + + log.WithField(logfields.Labels, newLabels).Debug("Merged labels for reserved:host identity") + + return identity.AddReservedIdentityWithLabels(identity.ReservedIdentityHost, newLabels) +} + // RemoveLabelsExcluded removes the given labels from all IPs inside the IDMD // except for the IPs / prefixes inside the given excluded set. // diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/types.go b/vendor/github.com/cilium/cilium/pkg/ipcache/types.go index 6b7bf02444c..0ac030d043d 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipcache/types.go +++ b/vendor/github.com/cilium/cilium/pkg/ipcache/types.go @@ -9,7 +9,6 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ipcachetypes "github.com/cilium/cilium/pkg/ipcache/types" "github.com/cilium/cilium/pkg/labels" @@ -137,7 +136,9 @@ func (s PrefixInfo) isValid() bool { func (s PrefixInfo) sortedBySourceThenResourceID() []ipcachetypes.ResourceID { resourceIDs := maps.Keys(s) - slices.SortFunc(resourceIDs, func(a, b ipcachetypes.ResourceID) bool { + sort.Slice(resourceIDs, func(i, j int) bool { + a := resourceIDs[i] + b := resourceIDs[j] if s[a].source != s[b].source { return !source.AllowOverwrite(s[a].source, s[b].source) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go b/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go index 2fd33a18659..12184076363 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go @@ -16,6 +16,7 @@ import ( "k8s.io/client-go/tools/cache" "github.com/cilium/cilium/pkg/allocator" + cacheKey "github.com/cilium/cilium/pkg/identity/key" "github.com/cilium/cilium/pkg/idpool" k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" @@ -88,7 +89,8 @@ func sanitizeK8sLabels(old map[string]string) (selected, skipped map[string]stri // AllocateID will create an identity CRD, thus creating the identity for this // key-> ID mapping. // Note: the lock field is not supported with the k8s CRD allocator. -func (c *crdBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) error { +// Returns an allocator key with the cilium identity stored in it. +func (c *crdBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) { selectedLabels, skippedLabels := sanitizeK8sLabels(key.GetAsMap()) log.WithField(logfields.Labels, skippedLabels).Info("Skipped non-kubernetes labels when labelling ciliumidentity. All labels will still be used in identity determination") @@ -100,11 +102,14 @@ func (c *crdBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator SecurityLabels: key.GetAsMap(), } - _, err := c.Client.CiliumV2().CiliumIdentities().Create(ctx, identity, metav1.CreateOptions{}) - return err + ci, err := c.Client.CiliumV2().CiliumIdentities().Create(ctx, identity, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + return key.PutValue(cacheKey.MetadataKeyBackendKey, ci), nil } -func (c *crdBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error { +func (c *crdBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) { return c.AllocateID(ctx, id, key) } @@ -136,13 +141,18 @@ func (c *crdBackend) AcquireReference(ctx context.Context, id idpool.ID, key all return err } if !exists { - return fmt.Errorf("identity (id:%q,key:%q) does not exist", id, key) + // fall back to the key stored in the allocator key. If it's not present + // then return the error. + ci, ok = key.Value(cacheKey.MetadataKeyBackendKey).(*v2.CiliumIdentity) + if !ok { + return fmt.Errorf("identity (id:%q,key:%q) does not exist", id, key) + } } - ci = ci.DeepCopy() ts, ok = ci.Annotations[HeartBeatAnnotation] if ok { log.WithField(logfields.Identity, ci).Infof("Identity marked for deletion (at %s); attempting to unmark it", ts) + ci = ci.DeepCopy() delete(ci.Annotations, HeartBeatAnnotation) _, err = c.Client.CiliumV2().CiliumIdentities().Update(ctx, ci, metav1.UpdateOptions{}) if err != nil { @@ -183,7 +193,7 @@ func (c *crdBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator. if reliablyMissing { // Recreate a missing master key - if err = c.AllocateID(ctx, id, key); err != nil { + if _, err = c.AllocateID(ctx, id, key); err != nil { return fmt.Errorf("Unable recreate missing CRD identity %q->%q: %s", key, id, err) } return nil diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go b/vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go index c2bf3a9eea2..93134bc5701 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/network_policy.go @@ -254,6 +254,26 @@ func ParseNetworkPolicy(np *slim_networkingv1.NetworkPolicy) (api.Rules, error) return api.Rules{rule}, nil } +// NetworkPolicyHasEndPort returns true if the network policy has an +// EndPort. +func NetworkPolicyHasEndPort(np *slim_networkingv1.NetworkPolicy) bool { + for _, iRule := range np.Spec.Ingress { + for _, port := range iRule.Ports { + if port.EndPort != nil && *port.EndPort > 0 { + return true + } + } + } + for _, eRule := range np.Spec.Egress { + for _, port := range eRule.Ports { + if port.EndPort != nil && *port.EndPort > 0 { + return true + } + } + } + return false +} + func ipBlockToCIDRRule(block *slim_networkingv1.IPBlock) api.CIDRRule { cidrRule := api.CIDRRule{} cidrRule.Cidr = api.CIDR(block.CIDR) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go index d7aff77af04..8dd8febcf66 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource/resource.go @@ -11,12 +11,15 @@ import ( "sync" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" k8sRuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "github.com/cilium/cilium/pkg/hive" k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics" + "github.com/cilium/cilium/pkg/k8s/watchers/resources" "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/metrics" "github.com/cilium/cilium/pkg/promise" @@ -44,14 +47,42 @@ import ( // The resource is lazy, e.g. it will not start the informer until a call // has been made to Events() or Store(). type Resource[T k8sRuntime.Object] interface { + // Resource can be observed either via Observe() or via Events(). The observable + // is implemented in terms of Events() and same semantics apply. stream.Observable[Event[T]] // Events returns a channel of events. Each event must be marked as handled - // with a call to Done(), otherwise no new events for this key will be emitted. + // with a call to Done() which marks the key processed. No new events for this key + // will be emitted before Done() is called. + // + // A missing Done() will lead to an eventual panic (via finalizer on Event[T]). + // Panic on this situation is needed as otherwise no new events would be emitted + // and thus this needs to be enforced. + // + // A stream of Upsert events are emitted first to replay the current state of the + // store after which incremental upserts and deletes follow until the underlying + // store is synchronized after which a Sync event is emitted and further incremental + // updates: + // + // (start observing), Upsert, Upsert, Upsert, (done replaying store contents), Upsert, Upsert, + // (store synchronized with API server), Sync, Upsert, Delete, Upsert, ... + // + // The emitting of the Sync event does not depend on whether or not Upsert events have + // all been marked Done() without an error. The sync event solely signals that the underlying + // store has synchronized and that Upsert events for objects in a synchronized store have been + // sent to the observer. // // When Done() is called with non-nil error the error handler is invoked, which - // can ignore, requeue the event or close the channel. The default error handler + // can ignore, requeue the event (by key) or close the channel. The default error handler // will requeue. + // + // If an Upsert is retried and the object has been deleted, a Delete event will be emitted instead. + // Conversely if a Delete event is retried and the object has been recreated with the same key, + // an Upsert will be emitted instead. + // + // If an objects is created and immediately deleted, then a slow observer may not observe this at + // all. In all cases a Delete event is only emitted if the observer has seen an Upsert. Whether or + // not it had been successfully handled (via Done(nil)) does not affect this property. Events(ctx context.Context, opts ...EventsOpt) <-chan Event[T] // Store retrieves the read-only store for the resource. Blocks until @@ -103,9 +134,9 @@ type Resource[T k8sRuntime.Object] interface { // See also pkg/k8s/resource/example/main.go for a runnable example. func New[T k8sRuntime.Object](lc hive.Lifecycle, lw cache.ListerWatcher, opts ...ResourceOption) Resource[T] { r := &resource[T]{ - queues: make(map[uint64]*keyQueue), - needed: make(chan struct{}, 1), - lw: lw, + subscribers: make(map[uint64]*subscriber[T]), + needed: make(chan struct{}, 1), + lw: lw, } r.opts.sourceObj = func() k8sRuntime.Object { var obj T @@ -172,8 +203,8 @@ type resource[T k8sRuntime.Object] struct { needed chan struct{} - queues map[uint64]*keyQueue - subId uint64 + subscribers map[uint64]*subscriber[T] + subId uint64 lw cache.ListerWatcher synchronized bool // flipped to true when informer has synced. @@ -237,28 +268,6 @@ func (r *resource[T]) metricEventReceived(action string, valid, equal bool) { metrics.KubernetesEventReceived.WithLabelValues(r.opts.metricScope, action, validStr, equalStr).Inc() } -func (r *resource[T]) pushUpdate(key Key) { - r.metricEventReceived("update", true, false) - r.mu.RLock() - for _, queue := range r.queues { - queue.AddUpsert(key) - } - r.mu.RUnlock() -} - -func (r *resource[T]) pushDelete(lastState any) { - key := NewKey(lastState) - obj := lastState - if d, ok := lastState.(cache.DeletedFinalStateUnknown); ok { - obj = d.Obj - } - r.mu.RLock() - for _, queue := range r.queues { - queue.AddDelete(key, obj) - } - r.mu.RUnlock() -} - func (r *resource[T]) Start(hive.HookContext) error { r.wg.Add(1) go r.startWhenNeeded() @@ -287,15 +296,7 @@ func (r *resource[T]) startWhenNeeded() { return } - // Construct the informer and run it. - handlerFuncs := - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj any) { r.pushUpdate(NewKey(obj)) }, - UpdateFunc: func(old any, new any) { r.pushUpdate(NewKey(new)) }, - DeleteFunc: func(obj any) { r.pushDelete(obj) }, - } - - store, informer := cache.NewTransformingInformer(r.lw, r.opts.sourceObj(), 0, handlerFuncs, r.opts.transform) + store, informer := r.newInformer() r.storeResolver.Resolve(&typedStore[T]{store}) r.wg.Add(1) @@ -304,14 +305,14 @@ func (r *resource[T]) startWhenNeeded() { informer.Run(r.ctx.Done()) }() - // Wait for cache to be synced before emitting the sync events. + // Wait for cache to be synced before emitting the sync event. if cache.WaitForCacheSync(r.ctx.Done(), informer.HasSynced) { // Emit the sync event for all subscribers. Subscribers // that subscribe afterwards will emit it by checking // r.synchronized. r.mu.Lock() - for _, queue := range r.queues { - queue.AddSync() + for _, sub := range r.subscribers { + sub.enqueueSync() } r.synchronized = true r.mu.Unlock() @@ -357,8 +358,8 @@ func (r *resource[T]) Observe(ctx context.Context, next func(Event[T]), complete // before the subscriber can handle the event only the latest state of object // is emitted. // -// The 'ctx' is used to cancel the subscription. If cancelled, the subscriber -// must drain the event channel. +// The 'ctx' is used to cancel the subscription. The returned channel will be +// closed when context is cancelled. // // Options are supported to configure rate limiting of retries // (WithRateLimiter), error handling strategy (WithErrorHandler). @@ -380,30 +381,20 @@ func (r *resource[T]) Events(ctx context.Context, opts ...EventsOpt) <-chan Even // Mark the resource as needed. This will start the informer if it was not already. r.markNeeded() + out := make(chan Event[T]) ctx, subCancel := context.WithCancel(ctx) - // Create a queue for receiving the events from the informer. - queue := &keyQueue{ - RateLimitingInterface: workqueue.NewRateLimitingQueue(options.rateLimiter), - errorHandler: options.errorHandler, + sub := &subscriber[T]{ + r: r, + options: options, + debugInfo: debugInfo, + wq: workqueue.NewRateLimitingQueue(options.rateLimiter), } - r.mu.Lock() - subId := r.subId - r.subId++ - r.mu.Unlock() - - out := make(chan Event[T]) - // Fork a goroutine to pop elements from the queue and pass them to the subscriber. + // Fork a goroutine to process the queued keys and pass them to the subscriber. r.wg.Add(1) go func() { defer r.wg.Done() - - // Make sure to call ShutDown() in the end. Calling ShutDownWithDrain is not - // enough as DelayingQueue does not implement it, so without ShutDown() we'd - // leak the (*delayingType).waitingLoop. - defer queue.ShutDown() - defer close(out) // Grab a handle to the store. Asynchronous as informer is started in the background. @@ -414,92 +405,32 @@ func (r *resource[T]) Events(ctx context.Context, opts ...EventsOpt) <-chan Even } r.mu.Lock() - r.queues[subId] = queue - - // Append the current set of keys to the queue. - keyIter := store.IterKeys() - for keyIter.Next() { - queue.AddUpsert(keyIter.Key()) + subId := r.subId + r.subId++ + r.subscribers[subId] = sub + + // Populate the queue with the initial set of keys that are already + // in the store. Done under the resource lock to synchronize with delta + // processing to make sure we don't end up queuing the key as initial key, + // processing it and then requeuing it again. + initialKeys := store.IterKeys() + for initialKeys.Next() { + sub.enqueueKey(initialKeys.Key()) } // If the informer is already synchronized, then the above set of keys is a consistent // snapshot and we can queue the sync entry. If we're not yet synchronized the sync will // be queued from startWhenNeeded() after the informer has synchronized. if r.synchronized { - queue.AddSync() + sub.enqueueSync() } r.mu.Unlock() - doneFinalizer := func(done *bool) { - // If you get here it is because an Event[T] was handed to a subscriber - // that forgot to call Event[T].Done(). - // - // Calling Done() is needed to mark the event as handled. This allows - // the next event for the same key to be handled and is used to clear - // rate limiting and retry counts of prior failures. - panic(fmt.Sprintf( - "%s has a broken event handler that did not call Done() "+ - "before event was garbage collected", - debugInfo)) - } + sub.processLoop(ctx, out, store) - loop: - for { - // Retrieve an item from the subscribers queue and then fetch the object - // from the store. - raw, shutdown := queue.Get() - if shutdown { - break - } - entry := raw.(queueEntry) - - var ( - // eventDoneSentinel is a heap allocated object referenced by Done(). - // If Done() is not called, a finalizer set on this object will be invoked - // which panics. If Done() is called, the finalizer is unset. - eventDoneSentinel = new(bool) - event Event[T] - ) - event.Done = func(err error) { - runtime.SetFinalizer(eventDoneSentinel, nil) - queue.eventDone(entry, err) - r.metricEventProcessed(event.Kind, err == nil) - } - - // Add a finalizer to catch forgotten calls to Done(). - runtime.SetFinalizer(eventDoneSentinel, doneFinalizer) - - switch entry := entry.(type) { - case syncEntry: - event.Kind = Sync - case deleteEntry: - event.Kind = Delete - event.Key = entry.key - event.Object = entry.obj.(T) - case upsertEntry: - obj, exists, err := store.GetByKey(entry.key) - // If the item didn't exist, then it's been deleted and a delete event will - // follow soon. - if err != nil || !exists { - event.Done(nil) - continue loop - } - event.Kind = Upsert - event.Key = entry.key - event.Object = obj - default: - panic(fmt.Sprintf("%T: unknown entry type %T", r, entry)) - } - - select { - case out <- event: - case <-ctx.Done(): - // Subscriber cancelled or resource is shutting down. We're not requiring - // the subscriber to drain the channel, so we're marking the event done here - // and not sending it. Will keep going until queue has been drained. - event.Done(nil) - } - } + r.mu.Lock() + delete(r.subscribers, subId) + r.mu.Unlock() }() // Fork a goroutine to wait for either the subscriber cancelling or the resource @@ -511,100 +442,324 @@ func (r *resource[T]) Events(ctx context.Context, opts ...EventsOpt) <-chan Even case <-r.ctx.Done(): case <-ctx.Done(): } - r.mu.Lock() - delete(r.queues, subId) - r.mu.Unlock() subCancel() - queue.ShutDownWithDrain() + sub.wq.ShutDownWithDrain() }() return out } -// keyQueue wraps the workqueue to implement the error retry logic for a single subscriber, -// e.g. it implements the eventDone() method called by Event[T].Done(). -type keyQueue struct { - workqueue.RateLimitingInterface - errorHandler ErrorHandler +type subscriber[T k8sRuntime.Object] struct { + r *resource[T] + debugInfo string + wq workqueue.RateLimitingInterface + options eventsOpts +} + +func (s *subscriber[T]) processLoop(ctx context.Context, out chan Event[T], store Store[T]) { + // Make sure to call ShutDown() in the end. Calling ShutDownWithDrain is not + // enough as DelayingQueue does not implement it, so without ShutDown() we'd + // leak the (*delayingType).waitingLoop. + defer s.wq.ShutDown() + + doneFinalizer := func(done *bool) { + // If you get here it is because an Event[T] was handed to a subscriber + // that forgot to call Event[T].Done(). + // + // Calling Done() is needed to mark the event as handled. This allows + // the next event for the same key to be handled and is used to clear + // rate limiting and retry counts of prior failures. + panic(fmt.Sprintf( + "%s has a broken event handler that did not call Done() "+ + "before event was garbage collected", + s.debugInfo)) + } + + // To synthesize delete events to the subscriber we keep track of the last know state + // of the object given to the subscriber. Objects are cleaned from this map when delete + // events are successfully processed. + var lastKnownObjects lastKnownObjects[T] + +loop: + for { + // Retrieve an item from the subscribers queue and then fetch the object + // from the store. + workItem, shutdown := s.getWorkItem() + if shutdown { + break + } + + var event Event[T] + + switch workItem := workItem.(type) { + case syncWorkItem: + event.Kind = Sync + case keyWorkItem: + obj, exists, err := store.GetByKey(workItem.key) + if !exists || err != nil { + // The object no longer exists in the store and thus has been deleted. + deletedObject, ok := lastKnownObjects.Load(workItem.key) + if !ok { + // Object was never seen by the subscriber. Ignore the event. + s.wq.Done(workItem) + continue loop + } + event.Kind = Delete + event.Key = workItem.key + event.Object = deletedObject + } else { + lastKnownObjects.Store(workItem.key, obj) + event.Kind = Upsert + event.Key = workItem.key + event.Object = obj + } + default: + panic(fmt.Sprintf("%T: unknown work item %T", s.r, workItem)) + } + + // eventDoneSentinel is a heap allocated object referenced by Done(). + // If Done() is not called, a finalizer set on this object will be invoked + // which panics. If Done() is called, the finalizer is unset. + var eventDoneSentinel = new(bool) + event.Done = func(err error) { + runtime.SetFinalizer(eventDoneSentinel, nil) + + if err == nil && event.Kind == Delete { + // Deletion processed successfully. Remove it from the set of + // deleted objects unless it was replaced by an upsert or newer + // deletion. + lastKnownObjects.DeleteByUID(event.Key, event.Object) + } + + s.eventDone(workItem, err) + + s.r.metricEventProcessed(event.Kind, err == nil) + } + + // Add a finalizer to catch forgotten calls to Done(). + runtime.SetFinalizer(eventDoneSentinel, doneFinalizer) + + select { + case out <- event: + case <-ctx.Done(): + // Subscriber cancelled or resource is shutting down. We're not requiring + // the subscriber to drain the channel, so we're marking the event done here + // and not sending it. + event.Done(nil) + + // Drain the queue without further processing. + for { + _, shutdown := s.getWorkItem() + if shutdown { + return + } + } + } + } } -func (kq *keyQueue) AddSync() { - kq.Add(syncEntry{}) +func (s *subscriber[T]) getWorkItem() (e workItem, shutdown bool) { + var raw any + raw, shutdown = s.wq.Get() + if shutdown { + return + } + return raw.(workItem), false } -func (kq *keyQueue) AddUpsert(key Key) { - // The entries must be added by value and not by pointer in order for - // them to be compared by value and not by pointer. - kq.Add(upsertEntry{key}) +func (s *subscriber[T]) enqueueSync() { + s.wq.Add(syncWorkItem{}) } -func (kq *keyQueue) AddDelete(key Key, obj any) { - kq.Add(deleteEntry{key, obj}) +func (s *subscriber[T]) enqueueKey(key Key) { + s.wq.Add(keyWorkItem{key}) } -func (kq *keyQueue) eventDone(entry queueEntry, err error) { - // This is based on the example found in k8s.io/client-go/examples/workqueue/main.go. +func (s *subscriber[T]) eventDone(entry workItem, err error) { + // This is based on the example found in k8s.io/client-go/examples/worsueue/main.go. // Mark the object as done being processed. If it was marked dirty // during processing, it'll be processed again. - defer kq.Done(entry) + defer s.wq.Done(entry) if err != nil { - numRequeues := kq.NumRequeues(entry) + numRequeues := s.wq.NumRequeues(entry) var action ErrorAction switch entry := entry.(type) { - case syncEntry: - action = kq.errorHandler(Key{}, numRequeues, err) - case upsertEntry: - action = kq.errorHandler(entry.key, numRequeues, err) - case deleteEntry: - action = kq.errorHandler(entry.key, numRequeues, err) + case syncWorkItem: + action = s.options.errorHandler(Key{}, numRequeues, err) + case keyWorkItem: + action = s.options.errorHandler(entry.key, numRequeues, err) default: panic(fmt.Sprintf("keyQueue: unhandled entry %T", entry)) } switch action { case ErrorActionRetry: - kq.AddRateLimited(entry) + s.wq.AddRateLimited(entry) case ErrorActionStop: - kq.ShutDown() + s.wq.ShutDown() case ErrorActionIgnore: - kq.Forget(entry) + s.wq.Forget(entry) default: - panic(fmt.Sprintf("keyQueue: unknown action %q from error handler %v", action, kq.errorHandler)) + panic(fmt.Sprintf("keyQueue: unknown action %q from error handler %v", action, s.options.errorHandler)) } } else { // As the object was processed successfully we can "forget" it. // This clears any rate limiter state associated with this object, so // it won't be throttled based on previous failure history. - kq.Forget(entry) + s.wq.Forget(entry) + } +} + +// lastKnownObjects stores the last known state of an object from a subscriber's +// perspective. It is used to emit delete events with the last known state of +// the object. +type lastKnownObjects[T k8sRuntime.Object] struct { + mu lock.RWMutex + objs map[Key]T +} + +func (l *lastKnownObjects[T]) Load(key Key) (obj T, ok bool) { + l.mu.RLock() + defer l.mu.RUnlock() + obj, ok = l.objs[key] + return +} + +func (l *lastKnownObjects[T]) Store(key Key, obj T) { + l.mu.Lock() + defer l.mu.Unlock() + if l.objs == nil { + l.objs = map[Key]T{} + } + l.objs[key] = obj +} + +// DeleteByUID removes the object, but only if the UID matches. UID +// might not match if the object has been re-created with the same key +// after deletion and thus Store'd again here. Once that incarnation +// is deleted, we will be here again and the UID will match. +func (l *lastKnownObjects[T]) DeleteByUID(key Key, objToDelete T) { + l.mu.Lock() + defer l.mu.Unlock() + + if obj, ok := l.objs[key]; ok { + if getUID(obj) == getUID(objToDelete) { + delete(l.objs, key) + } } } -// queueEntry restricts the set of types we use when type-switching over the +// workItem restricts the set of types we use when type-switching over the // queue entries, so that we'll get a compiler error on impossible types. // // The queue entries must be kept comparable and not be pointers as we want -// to be able to coalesce multiple upsertEntry's into a single element in the +// to be able to coalesce multiple keyEntry's into a single element in the // queue. -type queueEntry interface { - isQueueEntry() +type workItem interface { + isWorkItem() } -type syncEntry struct{} +// syncWorkItem marks the store as synchronized and thus a 'Sync' event can be +// emitted to the subscriber. +type syncWorkItem struct{} -func (syncEntry) isQueueEntry() {} +func (syncWorkItem) isWorkItem() {} -type upsertEntry struct { +// keyWorkItem marks work for a specific key. Whether this is an upsert or delete +// depends on the state of the store at the time this work item is processed. +type keyWorkItem struct { key Key } -func (upsertEntry) isQueueEntry() {} +func (keyWorkItem) isWorkItem() {} + +func (r *resource[T]) newInformer() (cache.Store, cache.Controller) { + clientState := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + opts := cache.DeltaFIFOOptions{KeyFunction: cache.MetaNamespaceKeyFunc, KnownObjects: clientState} + fifo := cache.NewDeltaFIFOWithOptions(opts) + transformer := r.opts.transform + cacheMutationDetector := cache.NewCacheMutationDetector(fmt.Sprintf("%T", r)) + cfg := &cache.Config{ + Queue: fifo, + ListerWatcher: r.lw, + ObjectType: r.opts.sourceObj(), + FullResyncPeriod: 0, + RetryOnError: false, + Process: func(obj interface{}, isInInitialList bool) error { + // Processing of the deltas is done under the resource mutex. This + // avoids emitting double events for new subscribers that list the + // keys in the store. + r.mu.RLock() + defer r.mu.RUnlock() + + for _, d := range obj.(cache.Deltas) { + var obj interface{} + if transformer != nil { + var err error + if obj, err = transformer(d.Object); err != nil { + return err + } + } else { + obj = d.Object + } -type deleteEntry struct { - key Key - obj any + // In CI we detect if the objects were modified and panic + // (e.g. when KUBE_CACHE_MUTATION_DETECTOR is set) + // this is a no-op in production environments. + cacheMutationDetector.AddObject(obj) + + key := NewKey(obj) + + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + metric := resources.MetricCreate + if d.Type != cache.Added { + metric = resources.MetricUpdate + } + r.metricEventReceived(metric, true, false) + + if _, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + } else { + if err := clientState.Add(obj); err != nil { + return err + } + } + + for _, sub := range r.subscribers { + sub.enqueueKey(key) + } + case cache.Deleted: + r.metricEventReceived(resources.MetricDelete, true, false) + + if err := clientState.Delete(obj); err != nil { + return err + } + + for _, sub := range r.subscribers { + sub.enqueueKey(key) + } + } + } + return nil + }, + } + return clientState, cache.New(cfg) } -func (deleteEntry) isQueueEntry() {} +func getUID(obj k8sRuntime.Object) types.UID { + meta, err := meta.Accessor(obj) + if err != nil { + // If we get here, it means the object does not implement ObjectMeta, and thus + // the Resource[T] has been instantianted with an unsuitable type T. + // As this would be catched immediately during development, panicing is the + // way. + panic(fmt.Sprintf("BUG: meta.Accessor() failed on %T: %s", obj, err)) + } + return meta.GetUID() +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go b/vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go index 56bb6a2ebda..c8cde783b07 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/resource_ctors.go @@ -21,6 +21,7 @@ import ( slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" slim_discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1" slim_discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1" + slim_networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1" "github.com/cilium/cilium/pkg/k8s/types" "github.com/cilium/cilium/pkg/k8s/utils" "github.com/cilium/cilium/pkg/option" @@ -255,3 +256,13 @@ func CiliumSlimEndpointResource(lc hive.Lifecycle, cs client.Clientset, opts ... }, TransformToCiliumEndpoint), ), nil } + +func IngressClassResource(lc hive.Lifecycle, cs client.Clientset, opts ...func(*metav1.ListOptions)) (resource.Resource[*slim_networkingv1.IngressClass], error) { + if !cs.IsEnabled() { + return nil, nil + } + lw := utils.ListerWatcherWithModifiers( + utils.ListerWatcherFromTyped[*slim_networkingv1.IngressClassList](cs.Slim().NetworkingV1().IngressClasses()), opts..., + ) + return resource.New[*slim_networkingv1.IngressClass](lc, lw, resource.WithMetric("IngressClass")), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go b/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go index e18c288b2a6..8b2ed1aafef 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/service_cache.go @@ -10,6 +10,7 @@ import ( "github.com/sirupsen/logrus" "golang.org/x/exp/slices" core_v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/datapath/types" @@ -274,6 +275,23 @@ func (s *ServiceCache) DeleteService(k8sSvc *slim_corev1.Service, swg *lock.Stop } } +// LocalServices returns the list of known services that are not marked as +// global (i.e., whose backends are all in the local cluster only). +func (s *ServiceCache) LocalServices() sets.Set[ServiceID] { + ids := sets.New[ServiceID]() + + s.mutex.RLock() + defer s.mutex.RUnlock() + + for id, svc := range s.services { + if !svc.IncludeExternal { + ids.Insert(id) + } + } + + return ids +} + // UpdateEndpoints parses a Kubernetes endpoints and adds or updates it in the // ServiceCache. Returns the ServiceID unless the Kubernetes endpoints could not // be parsed and a bool to indicate whether the endpoints was changed in the diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go b/vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go new file mode 100644 index 00000000000..95e4b1ca4af --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/watchers/resources/resources.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// This package contains exported resource identifiers and metric resource labels related to +// K8s watchers. +package resources + +const ( + // K8sAPIGroupServiceV1Core is the identifier for K8s resources of type core/v1/Service. + K8sAPIGroupServiceV1Core = "core/v1::Service" + // K8sAPIGroupPodV1Core is the identifier for K8s resources of type core/v1/Pod. + K8sAPIGroupPodV1Core = "core/v1::Pods" + // K8sAPIGroupSecretV1Cores is the identifier for K8s resources of type core/v1/Secret. + K8sAPIGroupSecretV1Core = "core/v1::Secrets" + // K8sAPIGroupEndpointSliceOrEndpoint is the combined identifier for K8s EndpointSlice and + // Endpoint resources. + K8sAPIGroupEndpointSliceOrEndpoint = "EndpointSliceOrEndpoint" + + // MetricCNP is the scope label for CiliumNetworkPolicy event metrics. + MetricCNP = "CiliumNetworkPolicy" + // MetricCCNP is the scope label for CiliumClusterwideNetworkPolicy event metrics. + MetricCCNP = "CiliumClusterwideNetworkPolicy" + // MetricCCG is the scope label for CiliumCIDRGroup event metrics. + MetricCCG = "CiliumCIDRGroup" + // MetricService is the scope label for Kubernetes Service event metrics. + MetricService = "Service" + // MetricEndpoint is the scope label for Kubernetes Endpoint event metrics. + MetricEndpoint = "Endpoint" + // MetricEndpointSlice is the scope label for Kubernetes EndpointSlice event metrics. + MetricEndpointSlice = "EndpointSlice" + + // MetricCreate the label for watcher metrics related to create events. + MetricCreate = "create" + // MetricUpdate the label for watcher metrics related to update events. + MetricUpdate = "update" + // MetricDelete the label for watcher metrics related to delete events. + MetricDelete = "delete" +) diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go index 245973a678a..43e2f847843 100644 --- a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go +++ b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go @@ -127,29 +127,29 @@ func (k *kvstoreBackend) DeleteAllKeys(ctx context.Context) { } // AllocateID allocates a key->ID mapping in the kvstore. -func (k *kvstoreBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) error { +func (k *kvstoreBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (allocator.AllocatorKey, error) { // create /id/ and fail if it already exists keyPath := path.Join(k.idPrefix, id.String()) keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey()))) success, err := k.backend.CreateOnly(ctx, keyPath, keyEncoded, false) if err != nil || !success { - return fmt.Errorf("unable to create master key '%s': %s", keyPath, err) + return nil, fmt.Errorf("unable to create master key '%s': %s", keyPath, err) } - return nil + return key, nil } // AllocateID allocates a key->ID mapping in the kvstore. -func (k *kvstoreBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error { +func (k *kvstoreBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) (allocator.AllocatorKey, error) { // create /id/ and fail if it already exists keyPath := path.Join(k.idPrefix, id.String()) keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey()))) success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, keyEncoded, false, lock) if err != nil || !success { - return fmt.Errorf("unable to create master key '%s': %s", keyPath, err) + return nil, fmt.Errorf("unable to create master key '%s': %s", keyPath, err) } - return nil + return key, nil } // AcquireReference marks that this node is using this key->ID mapping in the kvstore. diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go index c05cf985daf..3689e593879 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go @@ -8,8 +8,12 @@ import ( "net/netip" "strconv" "strings" + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/lock" ) // maskedIPToLabelString is the base method for serializing an IP + prefix into @@ -17,45 +21,47 @@ import ( // // For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't // support colons inside the name section of a label. -func maskedIPToLabelString(ip netip.Addr, prefix int) string { +func maskedIPToLabel(ip netip.Addr, prefix int) labels.Label { ipStr := ip.String() - ipNoColons := strings.Replace(ipStr, ":", "-", -1) - - // EndpointSelector keys can't start or end with a "-", so insert a - // zero at the start or end if it would otherwise have a "-" at that - // position. - preZero := "" - postZero := "" - if ipNoColons[0] == '-' { - preZero = "0" - } - if ipNoColons[len(ipNoColons)-1] == '-' { - postZero = "0" - } + var str strings.Builder str.Grow( - len(labels.LabelSourceCIDR) + - len(preZero) + - len(ipNoColons) + - len(postZero) + + 1 /* preZero */ + + len(ipStr) + + 1 /* postZero */ + 2 /*len of prefix*/ + - 2, /* ':' '/' */ + 1, /* '/' */ ) - str.WriteString(labels.LabelSourceCIDR) - str.WriteRune(':') - str.WriteString(preZero) - str.WriteString(ipNoColons) - str.WriteString(postZero) + + for i := 0; i < len(ipStr); i++ { + if ipStr[i] == ':' { + // EndpointSelector keys can't start or end with a "-", so insert a + // zero at the start or end if it would otherwise have a "-" at that + // position. + if i == 0 { + str.WriteByte('0') + str.WriteByte('-') + continue + } + if i == len(ipStr)-1 { + str.WriteByte('-') + str.WriteByte('0') + continue + } + str.WriteByte('-') + } else { + str.WriteByte(ipStr[i]) + } + } str.WriteRune('/') str.WriteString(strconv.Itoa(prefix)) - return str.String() + return labels.Label{Key: str.String(), Source: labels.LabelSourceCIDR} } // IPStringToLabel parses a string and returns it as a CIDR label. // // If ip is not a valid IP address or CIDR Prefix, returns an error. func IPStringToLabel(ip string) (labels.Label, error) { - var lblString string // factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's // an IP and not a CIDR. i := strings.LastIndexByte(ip, '/') @@ -64,15 +70,14 @@ func IPStringToLabel(ip string) (labels.Label, error) { if err != nil { return labels.Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err) } - lblString = maskedIPToLabelString(parsedIP, parsedIP.BitLen()) + return maskedIPToLabel(parsedIP, parsedIP.BitLen()), nil } else { parsedPrefix, err := netip.ParsePrefix(ip) if err != nil { return labels.Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err) } - lblString = maskedIPToLabelString(parsedPrefix.Masked().Addr(), parsedPrefix.Bits()) + return maskedIPToLabel(parsedPrefix.Masked().Addr(), parsedPrefix.Bits()), nil } - return labels.ParseLabel(lblString), nil } // GetCIDRLabels turns a CIDR into a set of labels representing the cidr itself @@ -85,23 +90,94 @@ func IPStringToLabel(ip string) (labels.Label, error) { // // The identity reserved:world is always added as it includes any CIDR. func GetCIDRLabels(prefix netip.Prefix) labels.Labels { + once.Do(func() { + // simplelru.NewLRU fails only when given a negative size, so we can skip the error check + cidrLabelsCache, _ = simplelru.NewLRU[netip.Prefix, []labels.Label](cidrLabelsCacheMaxSize, nil) + }) + + addr := prefix.Addr() ones := prefix.Bits() - result := make([]string, 0, ones+1) + lbls := make(labels.Labels, 1 /* this CIDR */ +ones /* the prefixes */ +1 /*world label*/) // If ones is zero, then it's the default CIDR prefix /0 which should // just be regarded as reserved:world. In all other cases, we need // to generate the set of prefixes starting from the /0 up to the // specified prefix length. - if ones > 0 { - ip := prefix.Addr() - for i := 0; i <= ones; i++ { - p := netip.PrefixFrom(ip, i) - label := maskedIPToLabelString(p.Masked().Addr(), i) - result = append(result, label) + if ones == 0 { + lbls[worldLabel.Key] = worldLabel + return lbls + } + + computeCIDRLabels( + cidrLabelsCache, + lbls, + nil, // avoid allocating space for the intermediate results until we need it + addr, + ones, + ) + lbls[worldLabel.Key] = worldLabel + + return lbls +} + +var ( + // cidrLabelsCache stores the partial computations for CIDR labels. + // This both avoids repeatedly computing the prefixes and makes sure the + // CIDR strings are reused to reduce memory usage. + // Stored in a lru map to limit memory usage. + // + // Stores e.g. for prefix "10.0.0.0/8" the labels ["10.0.0.0/8", ..., "0.0.0.0/0"]. + cidrLabelsCache *simplelru.LRU[netip.Prefix, []labels.Label] + + // mutex to serialize concurrent accesses to the cidrLabelsCache. + mu lock.Mutex +) + +const cidrLabelsCacheMaxSize = 8192 + +var ( + once sync.Once + + worldLabel = labels.Label{Key: labels.IDNameWorld, Source: labels.LabelSourceReserved} +) + +func computeCIDRLabels(cache *simplelru.LRU[netip.Prefix, []labels.Label], lbls labels.Labels, results []labels.Label, addr netip.Addr, ones int) []labels.Label { + if ones < 0 { + return results + } + + prefix, _ := addr.Prefix(ones) + + mu.Lock() + cachedLbls, ok := cache.Get(prefix) + mu.Unlock() + if ok { + for _, lbl := range cachedLbls { + lbls[lbl.Key] = lbl + } + if results == nil { + return cachedLbls + } else { + return append(results, cachedLbls...) } } - result = append(result, labels.LabelSourceReserved+":"+labels.IDNameWorld) + // Compute the label for this prefix (e.g. "cidr:10.0.0.0/8") + prefixLabel := maskedIPToLabel(prefix.Addr(), ones) + lbls[prefixLabel.Key] = prefixLabel + + // Keep computing the rest (e.g. "cidr:10.0.0.0/7", ...). + results = computeCIDRLabels( + cache, + lbls, + append(results, prefixLabel), + prefix.Addr(), ones-1, + ) + + // Cache the resulting labels derived from this prefix, e.g. /8, /7, ... + mu.Lock() + cache.Add(prefix, results[len(results)-ones-1:]) + mu.Unlock() - return labels.NewLabelsFromModel(result) + return results } diff --git a/vendor/github.com/cilium/cilium/pkg/labels/labels.go b/vendor/github.com/cilium/cilium/pkg/labels/labels.go index 1eaf46e47ee..ad3f89fd843 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/labels.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/labels.go @@ -10,6 +10,8 @@ import ( "net" "sort" "strings" + + "golang.org/x/exp/slices" ) const ( @@ -395,6 +397,15 @@ func NewLabelsFromModel(base []string) Labels { return lbls } +// FromSlice creates labels from a slice of labels. +func FromSlice(labels []Label) Labels { + lbls := make(Labels, len(labels)) + for _, lbl := range labels { + lbls[lbl.Key] = lbl + } + return lbls +} + // NewLabelsFromSortedList returns labels based on the output of SortedList() func NewLabelsFromSortedList(list string) Labels { return NewLabelsFromModel(strings.Split(list, ";")) @@ -487,7 +498,7 @@ func (l Labels) SortedList() []byte { for k := range l { keys = append(keys, k) } - sort.Strings(keys) + slices.Sort(keys) b := make([]byte, 0, len(keys)*2) buf := bytes.NewBuffer(b) diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go index f0bc6e1e61d..68f744f9b9e 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go @@ -658,6 +658,10 @@ const ( DestinationIP = "destinationIP" + LocalIP = "localIP" + + RemoteIP = "remoteIP" + SourceCIDR = "sourceCIDR" // DestinationCIDR is a destination CIDR diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go index 7e32c4c11a3..016f2bc58df 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go @@ -24,13 +24,14 @@ var ( NoOpMetric prometheus.Metric = &mockMetric{} NoOpCollector prometheus.Collector = &collector{} - NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector} - NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector} - NoOpObserver metricpkg.Observer = &observer{} - NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector} - NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector} - NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector} - NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector} + NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector} + NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector} + NoOpObserver metricpkg.Observer = &observer{} + NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector} + NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector} + NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector} + NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector} + NoOpGaugeDeletableVec metricpkg.DeletableVec[metricpkg.Gauge] = &gaugeDeletableVec{gaugeVec{NoOpCollector}} ) // Metric @@ -156,6 +157,24 @@ func (g *gauge) Opts() metricpkg.Opts { return metricpkg.Opts{} } // GaugeVec +type gaugeDeletableVec struct { + gaugeVec +} + +func (*gaugeDeletableVec) Delete(ll prometheus.Labels) bool { + return false +} + +func (*gaugeDeletableVec) DeleteLabelValues(lvs ...string) bool { + return false +} + +func (*gaugeDeletableVec) DeletePartialMatch(labels prometheus.Labels) int { + return 0 +} + +func (*gaugeDeletableVec) Reset() {} + type gaugeVec struct { prometheus.Collector } diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go index e75db20edb7..6ca42b5c9ee 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go @@ -19,6 +19,7 @@ import ( "github.com/sirupsen/logrus" "github.com/cilium/cilium/api/v1/models" + "github.com/cilium/cilium/pkg/datapath/linux/probes" "github.com/cilium/cilium/pkg/metrics/metric" "github.com/cilium/cilium/pkg/promise" "github.com/cilium/cilium/pkg/version" @@ -246,11 +247,11 @@ var ( // NodeConnectivityStatus is the connectivity status between local node to // other node intra or inter cluster. - NodeConnectivityStatus = NoOpGaugeVec + NodeConnectivityStatus = NoOpGaugeDeletableVec // NodeConnectivityLatency is the connectivity latency between local node to // other node intra or inter cluster. - NodeConnectivityLatency = NoOpGaugeVec + NodeConnectivityLatency = NoOpGaugeDeletableVec // Endpoint @@ -258,6 +259,9 @@ var ( // It must be thread-safe. Endpoint metric.GaugeFunc + // EndpointMaxIfindex is the maximum observed interface index for existing endpoints + EndpointMaxIfindex = NoOpGauge + // EndpointRegenerationTotal is a count of the number of times any endpoint // has been regenerated and success/fail outcome EndpointRegenerationTotal = NoOpCounterVec @@ -570,9 +574,10 @@ var ( type LegacyMetrics struct { BootstrapTimes metric.Vec[metric.Observer] APIInteractions metric.Vec[metric.Observer] - NodeConnectivityStatus metric.Vec[metric.Gauge] - NodeConnectivityLatency metric.Vec[metric.Gauge] + NodeConnectivityStatus metric.DeletableVec[metric.Gauge] + NodeConnectivityLatency metric.DeletableVec[metric.Gauge] Endpoint metric.GaugeFunc + EndpointMaxIfindex metric.Gauge EndpointRegenerationTotal metric.Vec[metric.Counter] EndpointStateCount metric.Vec[metric.Gauge] EndpointRegenerationTimeStats metric.Vec[metric.Observer] @@ -1293,6 +1298,23 @@ func NewLegacyMetrics() *LegacyMetrics { }), } + ifindexOpts := metric.GaugeOpts{ + ConfigName: Namespace + "_endpoint_max_ifindex", + Disabled: true, + Namespace: Namespace, + Name: "endpoint_max_ifindex", + Help: "Maximum interface index observed for existing endpoints", + } + // On kernels which do not provide ifindex via the FIB, Cilium needs + // to store it in the CT map, with a field limit of max(uint16). + // The EndpointMaxIfindex metric can be used to determine if that + // limit is approaching. However, it should only be enabled by + // default if we observe that the FIB is not providing the ifindex. + if probes.HaveFibIfindex() != nil { + ifindexOpts.Disabled = false + } + lm.EndpointMaxIfindex = metric.NewGauge(ifindexOpts) + v := version.GetCiliumVersion() lm.VersionMetric.WithLabelValues(v.Version, v.Revision, v.Arch) @@ -1301,6 +1323,7 @@ func NewLegacyMetrics() *LegacyMetrics { NodeConnectivityStatus = lm.NodeConnectivityStatus NodeConnectivityLatency = lm.NodeConnectivityLatency Endpoint = lm.Endpoint + EndpointMaxIfindex = lm.EndpointMaxIfindex EndpointRegenerationTotal = lm.EndpointRegenerationTotal EndpointStateCount = lm.EndpointStateCount EndpointRegenerationTimeStats = lm.EndpointRegenerationTimeStats diff --git a/vendor/github.com/cilium/cilium/pkg/monitor/logrecord.go b/vendor/github.com/cilium/cilium/pkg/monitor/logrecord.go index fd92f3cef02..fbff892bb67 100644 --- a/vendor/github.com/cilium/cilium/pkg/monitor/logrecord.go +++ b/vendor/github.com/cilium/cilium/pkg/monitor/logrecord.go @@ -8,7 +8,7 @@ import ( "fmt" "strings" - "github.com/miekg/dns" + "github.com/cilium/dns" "github.com/cilium/cilium/pkg/proxy/accesslog" ) diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/node.go b/vendor/github.com/cilium/cilium/pkg/node/types/node.go index ace7f7c49e8..5d7b382083a 100644 --- a/vendor/github.com/cilium/cilium/pkg/node/types/node.go +++ b/vendor/github.com/cilium/cilium/pkg/node/types/node.go @@ -581,7 +581,7 @@ func (n *Node) GetIPv6AllocCIDRs() []*cidr.CIDR { if n.IPv6AllocCIDR != nil { result = append(result, n.IPv6AllocCIDR) } - if len(n.IPv4SecondaryAllocCIDRs) > 0 { + if len(n.IPv6SecondaryAllocCIDRs) > 0 { result = append(result, n.IPv6SecondaryAllocCIDRs...) } return result diff --git a/vendor/github.com/cilium/cilium/pkg/option/config.go b/vendor/github.com/cilium/cilium/pkg/option/config.go index 9619e04eafc..d0594423845 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/config.go +++ b/vendor/github.com/cilium/cilium/pkg/option/config.go @@ -107,6 +107,9 @@ const ( // ConntrackGCInterval is the name of the ConntrackGCInterval option ConntrackGCInterval = "conntrack-gc-interval" + // ConntrackGCMaxInterval is the name of the ConntrackGCMaxInterval option + ConntrackGCMaxInterval = "conntrack-gc-max-interval" + // DebugArg is the argument enables debugging mode DebugArg = "debug" @@ -652,6 +655,10 @@ const ( // PolicyMapEntriesName configures max entries for BPF policymap. PolicyMapEntriesName = "bpf-policy-map-max" + // PolicyMapFullReconciliationInterval sets the interval for performing the full + // reconciliation of the endpoint policy map. + PolicyMapFullReconciliationIntervalName = "bpf-policy-map-full-reconciliation-interval" + // SockRevNatEntriesName configures max entries for BPF sock reverse nat // entries. SockRevNatEntriesName = "bpf-sock-rev-map-max" @@ -759,6 +766,9 @@ const ( // EnableWireguard is the name of the option to enable wireguard EnableWireguard = "enable-wireguard" + // WireguardEncapsulate enables encapsulation before encryption + WireguardEncapsulate = "wireguard-encapsulate" + // EnableL2Announcements is the name of the option to enable l2 announcements EnableL2Announcements = "enable-l2-announcements" @@ -1547,6 +1557,10 @@ type DaemonConfig struct { // endpoint may allow traffic to exchange traffic with. PolicyMapEntries int + // PolicyMapFullReconciliationInterval is the interval at which to perform + // the full reconciliation of the endpoint policy map. + PolicyMapFullReconciliationInterval time.Duration + // SockRevNatEntries is the maximum number of sock rev nat mappings // allowed in the BPF rev nat table SockRevNatEntries int @@ -1700,6 +1714,9 @@ type DaemonConfig struct { // EnableWireguardUserspaceFallback enables the fallback to the userspace implementation EnableWireguardUserspaceFallback bool + // WireguardEncapsulate encapsulates a packet (VXLAN/Geneve) before encrypting it + WireguardEncapsulate bool + // EnableL2Announcements enables L2 announcement of service IPs EnableL2Announcements bool @@ -1924,6 +1941,10 @@ type DaemonConfig struct { // interval ConntrackGCInterval time.Duration + // ConntrackGCMaxInterval if set limits the automatic GC interval calculation to + // the specified maximum value. + ConntrackGCMaxInterval time.Duration + // K8sEventHandover enables use of the kvstore to optimize Kubernetes // event handling by listening for k8s events in the operator and // mirroring it into the kvstore for reduced overhead in large @@ -2722,7 +2743,7 @@ func (c *DaemonConfig) DirectRoutingDeviceRequired() bool { return true } - return (c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard) && !c.TunnelingEnabled() + return c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard } func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error { @@ -2978,6 +2999,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IPv6MCastDevice = vp.GetString(IPv6MCastDevice) c.EnableIPSec = vp.GetBool(EnableIPSecName) c.EnableWireguard = vp.GetBool(EnableWireguard) + c.WireguardEncapsulate = vp.GetBool(WireguardEncapsulate) c.EnableL2Announcements = vp.GetBool(EnableL2Announcements) c.L2AnnouncerLeaseDuration = vp.GetDuration(L2AnnouncerLeaseDuration) c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline) @@ -3323,6 +3345,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } c.ConntrackGCInterval = vp.GetDuration(ConntrackGCInterval) + c.ConntrackGCMaxInterval = vp.GetDuration(ConntrackGCMaxInterval) if m, err := command.GetStringMapStringE(vp, KVStoreOpt); err != nil { log.Fatalf("unable to parse %s: %s", KVStoreOpt, err) @@ -3717,6 +3740,7 @@ func (c *DaemonConfig) calculateBPFMapSizes(vp *viper.Viper) error { c.NATMapEntriesGlobal = vp.GetInt(NATMapEntriesGlobalName) c.NeighMapEntriesGlobal = vp.GetInt(NeighMapEntriesGlobalName) c.PolicyMapEntries = vp.GetInt(PolicyMapEntriesName) + c.PolicyMapFullReconciliationInterval = vp.GetDuration(PolicyMapFullReconciliationIntervalName) c.SockRevNatEntries = vp.GetInt(SockRevNatEntriesName) c.LBMapEntries = vp.GetInt(LBMapEntriesName) c.LBServiceMapEntries = vp.GetInt(LBServiceMapMaxEntries) diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go index d41adf1b635..a717f498a76 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go @@ -23,6 +23,11 @@ const ( PortProtocolAny = "0/ANY" ) +// IsAny returns true if an L4Proto represents ANY protocol +func (l4 L4Proto) IsAny() bool { + return l4 == ProtoAny || string(l4) == "" +} + // PortProtocol specifies an L4 port with an optional transport protocol type PortProtocol struct { // Port is an L4 port number. For now the string will be strictly @@ -56,7 +61,7 @@ func (p PortProtocol) Covers(other PortProtocol) bool { return false } if p.Protocol != other.Protocol { - return p.Protocol == "" || p.Protocol == ProtoAny + return p.Protocol.IsAny() } return true } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/l4.go index ec56801aeaf..7b1a2b91dba 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/l4.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/l4.go @@ -488,7 +488,7 @@ type ChangeState struct { // 'p.PolicyMapState' using denyPreferredInsertWithChanges(). // Keys and old values of any added or deleted entries are added to 'changes'. // The implementation of 'identities' is also in a locked state. -func (l4Filter *L4Filter) toMapState(p *EndpointPolicy, identities Identities, features policyFeatures, entryCb entryCallback, changes ChangeState) { +func (l4Filter *L4Filter) toMapState(p *EndpointPolicy, features policyFeatures, entryCb entryCallback, changes ChangeState) { port := uint16(l4Filter.Port) proto := uint8(l4Filter.U8Proto) @@ -552,7 +552,7 @@ func (l4Filter *L4Filter) toMapState(p *EndpointPolicy, identities Identities, f if cs.IsWildcard() { keyToAdd.Identity = 0 if entryCb(keyToAdd, &entry) { - p.PolicyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, identities, features, changes) + p.PolicyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, p.SelectorCache, features, changes) if port == 0 { // Allow-all @@ -582,7 +582,7 @@ func (l4Filter *L4Filter) toMapState(p *EndpointPolicy, identities Identities, f for _, id := range idents { keyToAdd.Identity = id.Uint32() if entryCb(keyToAdd, &entry) { - p.PolicyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, identities, features, changes) + p.PolicyMapState.denyPreferredInsertWithChanges(keyToAdd, entry, p.SelectorCache, features, changes) } } } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go index 0803558cac7..f5efc942cb6 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go @@ -50,7 +50,7 @@ const ( type MapState map[Key]MapStateEntry type Identities interface { - GetLabelsLocked(identity.NumericIdentity) labels.LabelArray + GetNetsLocked(identity.NumericIdentity) []*net.IPNet } // Key is the userspace representation of a policy key in BPF. It is @@ -132,9 +132,6 @@ type MapStateEntry struct { // dependents contains the keys for entries create based on this entry. These entries // will be deleted once all of the owners are deleted. dependents Keys - - // cachedNets caches the subnets (if any) associated with this MapStateEntry. - cachedNets []*net.IPNet } // NewMapStateEntry creates a map state entry. If redirect is true, the @@ -189,48 +186,26 @@ func (e *MapStateEntry) HasDependent(key Key) bool { return ok } +var worldNets = []*net.IPNet{ + {IP: net.IPv4zero, Mask: net.CIDRMask(0, net.IPv4len*8)}, + {IP: net.IPv6zero, Mask: net.CIDRMask(0, net.IPv6len*8)}, +} + // getNets returns the most specific CIDR for an identity. For the "World" identity // it returns both IPv4 and IPv6. -func (e *MapStateEntry) getNets(identities Identities, ident uint32) []*net.IPNet { - // Caching results is not dangerous in this situation as the entry - // is ephemerally tied to the lifecycle of the MapState object that - // it will be in. - if e.cachedNets != nil { - return e.cachedNets - } +func getNets(identities Identities, ident uint32) []*net.IPNet { + // World identities are handled explicitly for two reasons: + // 1. 'identities' may be nil, but world identities are still expected to be considered + // 2. SelectorCache is not be informed of reserved/world identities in all test cases id := identity.NumericIdentity(ident) if id == identity.ReservedIdentityWorld { - e.cachedNets = []*net.IPNet{ - {IP: net.IPv4zero, Mask: net.CIDRMask(0, net.IPv4len*8)}, - {IP: net.IPv6zero, Mask: net.CIDRMask(0, net.IPv6len*8)}, - } - return e.cachedNets + return worldNets } // CIDR identities have a local scope, so we can skip the rest if id is not of local scope. if !id.HasLocalScope() || identities == nil { return nil } - lbls := identities.GetLabelsLocked(id) - var ( - maskSize int - mostSpecificCidr *net.IPNet - ) - for _, lbl := range lbls { - if lbl.Source == labels.LabelSourceCIDR { - _, netIP, err := net.ParseCIDR(lbl.Key) - if err == nil { - if ms, _ := netIP.Mask.Size(); ms > maskSize { - mostSpecificCidr = netIP - maskSize = ms - } - } - } - } - if mostSpecificCidr != nil { - e.cachedNets = []*net.IPNet{mostSpecificCidr} - return e.cachedNets - } - return nil + return identities.GetNetsLocked(id) } // AddDependent adds 'key' to the set of dependent keys. @@ -383,7 +358,6 @@ func (e MapStateEntry) String() string { func (keys MapState) denyPreferredInsert(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures) { // Enforce nil values from NewMapStateEntry newEntry.dependents = nil - newEntry.cachedNets = nil keys.denyPreferredInsertWithChanges(newKey, newEntry, identities, features, ChangeState{}) } @@ -485,18 +459,72 @@ func (keys MapState) deleteKeyWithChanges(key Key, owner MapStateOwner, changes } } -// entryIdentityIsSupersetOf compares two entries and keys to see if the primary identity contains +// identityIsSupersetOf compares two entries and keys to see if the primary identity contains // the compared identity. This means that either that primary identity is 0 (i.e. it is a superset // of every other identity), or one of the subnets of the primary identity fully contains or is // equal to one of the subnets in the compared identity (note:this covers cases like "reserved:world"). -func entryIdentityIsSupersetOf(primaryKey Key, primaryEntry MapStateEntry, compareKey Key, compareEntry MapStateEntry, identities Identities) bool { +func identityIsSupersetOf(primaryIdentity, compareIdentity uint32, identities Identities) bool { // If the identities are equal then neither is a superset (for the purposes of our business logic). - if primaryKey.Identity == compareKey.Identity { + if primaryIdentity == compareIdentity { return false } - return primaryKey.Identity == 0 && compareKey.Identity != 0 || - ip.NetsContainsAny(primaryEntry.getNets(identities, primaryKey.Identity), - compareEntry.getNets(identities, compareKey.Identity)) + + // Consider an identity that selects a broader CIDR as a superset of + // an identity that selects a narrower CIDR. For instance, an identity + // corresponding to 192.0.0.0/16 is a superset of the identity that + // corresponds to 192.0.2.3/32. + // + // The reasons we need to do this are surprisingly complex, taking into + // consideration design decisions around the handling of ToFQDNs policy + // and how L4PolicyMap/L4Filter structures cache the policies with + // respect to specific CIDRs. More specifically: + // - At the time of initial L4Filter creation, it is not known which + // specific CIDRs (or corresponding identities) are selected by a + // toFQDNs rule in the policy engine. + // - It is possible to have a CIDR deny rule that should deny peers + // that are allowed by a ToFQDNs statement. The precedence rules in + // the API for such policy conflicts define that the deny should take + // precedence. + // - Consider a case where there is a deny rule for 192.0.0.0/16 with + // an allow rule for cilium.io, and one of the IP addresses for + // cilium.io is 192.0.2.3. + // - If the IP for cilium.io was known at initial policy computation + // time, then we would calculate the MapState from the L4Filters and + // immediately determine that there is a conflict between the + // L4Filter that denies 192.0.0.0/16 vs. the allow for 192.0.2.3. + // From this we could immediately discard the "allow to 192.0.2.3" + // policymap entry during policy calculation. This would satisfy the + // API constraint that deny rules take precedence over allow rules. + // However, this is not the case for ToFQDNs -- the IPs are not known + // until DNS resolution time by the selected application / endpoint. + // - In order to make ToFQDNs policy implementation efficient, it uses + // a shorter incremental policy computation path that attempts to + // directly implement the ToFQDNs allow into a MapState entry without + // reaching back up to the L4Filter layer to iterate all selectors + // to determine traffic reachability for this newly learned IP. + // - As such, when the new ToFQDNs allow for the 192.0.2.3 IP address + // is implemented, we must iterate back through all existing MapState + // entries to determine whether any of the other map entries already + // denies this traffic by virtue of the IP prefix being a superset of + // this new allow. This allows us to ensure that the broader CIDR + // deny semantics are correctly applied when there is a combination + // of CIDR deny rules and ToFQDNs allow rules. + // + // An alternative to this approach might be to change the ToFQDNs + // policy calculation layer to reference back to the L4Filter layer, + // and perhaps introduce additional CIDR caching somewhere there so + // that this policy computation can be efficient while handling DNS + // responses. As of the writing of this message, such there is no + // active proposal to implement this proposal. As a result, any time + // there is an incremental policy update for a new map entry, we must + // iterate through all entries in the map and re-evaluate superset + // relationships for deny entries to ensure that policy precedence is + // correctly implemented between the new and old entries, taking into + // account whether the identities may represent CIDRs that have a + // superset relationship. + return primaryIdentity == 0 && compareIdentity != 0 || + ip.NetsContainsAny(getNets(identities, primaryIdentity), + getNets(identities, compareIdentity)) } // protocolsMatch checks to see if two given keys match on protocol. @@ -543,7 +571,7 @@ func (keys MapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStat continue } if !v.IsDeny { - if entryIdentityIsSupersetOf(k, v, newKey, newEntry, identities) { + if identityIsSupersetOf(k.Identity, newKey.Identity, identities) { if newKey.PortProtoIsBroader(k) { // If this iterated-allow-entry is a superset of the new-entry // and it has a more specific port-protocol than the new-entry @@ -560,7 +588,7 @@ func (keys MapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStat newEntry.AddDependent(newKeyCpy) } } else if (newKey.Identity == k.Identity || - entryIdentityIsSupersetOf(newKey, newEntry, k, v, identities)) && + identityIsSupersetOf(newKey.Identity, k.Identity, identities)) && (newKey.PortProtoIsBroader(k) || newKey.PortProtoIsEqual(k)) { // If the new-entry is a superset (or equal) of the iterated-allow-entry and // the new-entry has a broader (or equal) port-protocol then we @@ -568,7 +596,7 @@ func (keys MapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStat keys.deleteKeyWithChanges(k, nil, changes) } } else if (newKey.Identity == k.Identity || - entryIdentityIsSupersetOf(k, v, newKey, newEntry, identities)) && + identityIsSupersetOf(k.Identity, newKey.Identity, identities)) && k.DestPort == 0 && k.Nexthdr == 0 && !v.HasDependent(newKey) { // If this iterated-deny-entry is a supserset (or equal) of the new-entry and @@ -581,7 +609,7 @@ func (keys MapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStat // but there *may* be performance tradeoffs. return } else if (newKey.Identity == k.Identity || - entryIdentityIsSupersetOf(newKey, newEntry, k, v, identities)) && + identityIsSupersetOf(newKey.Identity, k.Identity, identities)) && newKey.DestPort == 0 && newKey.Nexthdr == 0 && !newEntry.HasDependent(k) { // If this iterated-deny-entry is a subset (or equal) of the new-entry and @@ -605,7 +633,7 @@ func (keys MapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStat } // NOTE: We do not delete redundant allow entries. if v.IsDeny { - if entryIdentityIsSupersetOf(newKey, newEntry, k, v, identities) { + if identityIsSupersetOf(newKey.Identity, k.Identity, identities) { if k.PortProtoIsBroader(newKey) { // If the new-entry is *only* superset of the iterated-deny-entry // and the new-entry has a more specific port-protocol than the @@ -623,7 +651,7 @@ func (keys MapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStat keys.addDependentOnEntry(k, v, denyKeyCpy, changes) } } else if (k.Identity == newKey.Identity || - entryIdentityIsSupersetOf(k, v, newKey, newEntry, identities)) && + identityIsSupersetOf(k.Identity, newKey.Identity, identities)) && (k.PortProtoIsBroader(newKey) || k.PortProtoIsEqual(newKey)) && !v.HasDependent(newKey) { // If the iterated-deny-entry is a superset (or equal) of the new-entry and has a diff --git a/vendor/github.com/cilium/cilium/pkg/policy/repository.go b/vendor/github.com/cilium/cilium/pkg/policy/repository.go index fd7212638cd..d3838470701 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/repository.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/repository.go @@ -111,6 +111,9 @@ type Repository struct { Mutex lock.RWMutex rules ruleSlice + // rulesIndexByK8sUID indexes the rules by k8s UID. + rulesIndexByK8sUID map[string]*rule + // revision is the revision of the policy repository. It will be // incremented whenever the policy repository is changed. // Always positive (>0). @@ -190,10 +193,11 @@ func NewStoppedPolicyRepository( ) *Repository { selectorCache := NewSelectorCache(idAllocator, idCache) repo := &Repository{ - revision: 1, - selectorCache: selectorCache, - certManager: certManager, - secretManager: secretManager, + rulesIndexByK8sUID: map[string]*rule{}, + revision: 1, + selectorCache: selectorCache, + certManager: certManager, + secretManager: secretManager, } repo.policyCache = NewPolicyCache(repo, true) return repo @@ -367,6 +371,13 @@ func (p *Repository) AllowsEgressRLocked(ctx *SearchContext) api.Decision { func (p *Repository) SearchRLocked(lbls labels.LabelArray) api.Rules { result := api.Rules{} + if uid := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" { + r, ok := p.rulesIndexByK8sUID[uid] + if ok { + result = append(result, &r.Rule) + } + return result + } for _, r := range p.rules { if r.Labels.Contains(lbls) { result = append(result, &r.Rule) @@ -406,6 +417,9 @@ func (p *Repository) AddListLocked(rules api.Rules) (ruleSlice, uint64) { metadata: newRuleMetadata(), } newList[i] = newRule + if uid := rules[i].Labels.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" { + p.rulesIndexByK8sUID[uid] = newRule + } } p.rules = append(p.rules, newList...) @@ -516,6 +530,9 @@ func (p *Repository) DeleteByLabelsLocked(lbls labels.LabelArray) (ruleSlice, ui if deleted > 0 { p.BumpRevision() p.rules = new + if uid := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" { + delete(p.rulesIndexByK8sUID, uid) + } metrics.Policy.Sub(float64(deleted)) } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/resolve.go b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go index a193d97891e..2e3f52ff853 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/resolve.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go @@ -160,7 +160,7 @@ func (l4policy L4DirectionPolicy) toMapState(p *EndpointPolicy) { for _, l4 := range l4policy.PortRules { lookupDone := false proxyport := uint16(0) - l4.toMapState(p, p.SelectorCache, l4policy.features, func(keyFromFilter Key, entry *MapStateEntry) bool { + l4.toMapState(p, l4policy.features, func(keyFromFilter Key, entry *MapStateEntry) bool { // Fix up the proxy port for entries that need proxy redirection if entry.IsRedirectEntry() { if !lookupDone { @@ -189,19 +189,20 @@ type getProxyPortFunc func(*L4Filter) (proxyPort uint16, ok bool) // UpdateRedirects updates redirects in the EndpointPolicy's PolicyMapState by using the provided // function to obtain a proxy port number to use. Changes to 'p.PolicyMapState' are collected in // 'adds' and 'updated' so that they can be reverted when needed. -func (p *EndpointPolicy) UpdateRedirects(ingress bool, identities Identities, getProxyPort getProxyPortFunc, changes ChangeState) { +func (p *EndpointPolicy) UpdateRedirects(ingress bool, getProxyPort getProxyPortFunc, changes ChangeState) { l4policy := &p.L4Policy.Ingress if ingress { l4policy = &p.L4Policy.Egress } + l4policy.updateRedirects(p, getProxyPort, changes) +} + +func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, getProxyPort getProxyPortFunc, changes ChangeState) { // Selectorcache needs to be locked for toMapState (GetLabels()) call p.SelectorCache.mutex.RLock() - l4policy.updateRedirects(p, identities, getProxyPort, changes) - p.SelectorCache.mutex.RUnlock() -} + defer p.SelectorCache.mutex.RUnlock() -func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, identities Identities, getProxyPort getProxyPortFunc, changes ChangeState) { for _, l4 := range l4policy.PortRules { if l4.IsRedirect() { // Check if we are denying this specific L4 first regardless the L3, if there are any deny policies @@ -215,7 +216,7 @@ func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, identities } // Set the proxy port in the policy map. - l4.toMapState(p, identities, l4policy.features, func(_ Key, entry *MapStateEntry) bool { + l4.toMapState(p, l4policy.features, func(_ Key, entry *MapStateEntry) bool { if entry.IsRedirectEntry() { entry.ProxyPort = redirectPort } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/rule.go b/vendor/github.com/cilium/cilium/pkg/policy/rule.go index dfcaf04d8da..9011e614079 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/rule.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/rule.go @@ -432,13 +432,7 @@ func mergeIngress(policyCtx PolicyContext, ctx *SearchContext, fromEndpoints api } for _, p := range r.GetPortProtocols() { - if p.Protocol != api.ProtoAny { - cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap) - if err != nil { - return err - } - found += cnt - } else { + if p.Protocol.IsAny() { cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoTCP, ruleLabels, resMap) if err != nil { return err @@ -456,6 +450,12 @@ func mergeIngress(policyCtx PolicyContext, ctx *SearchContext, fromEndpoints api return err } found += cnt + } else { + cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap) + if err != nil { + return err + } + found += cnt } } return nil @@ -651,13 +651,7 @@ func mergeEgress(policyCtx PolicyContext, ctx *SearchContext, toEndpoints api.En } for _, p := range r.GetPortProtocols() { - if p.Protocol != api.ProtoAny { - cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns) - if err != nil { - return err - } - found += cnt - } else { + if p.Protocol.IsAny() { cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoTCP, ruleLabels, resMap, fqdns) if err != nil { return err @@ -675,6 +669,12 @@ func mergeEgress(policyCtx PolicyContext, ctx *SearchContext, toEndpoints api.En return err } found += cnt + } else { + cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns) + if err != nil { + return err + } + found += cnt } } return nil diff --git a/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go index 7b6f9a6b890..a90f7ccf992 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go @@ -161,7 +161,9 @@ type identitySelector interface { type scIdentity struct { NID identity.NumericIdentity lbls labels.LabelArray - namespace string // value of the namespace label, or "" + nets []*net.IPNet // Most specific CIDR for the identity, if any. + computed bool // nets has been computed + namespace string // value of the namespace label, or "" } // scIdentityCache is a cache of Identities keyed by the numeric identity @@ -171,10 +173,37 @@ func newIdentity(nid identity.NumericIdentity, lbls labels.LabelArray) scIdentit return scIdentity{ NID: nid, lbls: lbls, + nets: getLocalScopeNets(nid, lbls), namespace: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel), + computed: true, } } +// getLocalScopeNets returns the most specific CIDR for a local scope identity. +func getLocalScopeNets(id identity.NumericIdentity, lbls labels.LabelArray) []*net.IPNet { + if id.HasLocalScope() { + var ( + maskSize int + mostSpecificCidr *net.IPNet + ) + for _, lbl := range lbls { + if lbl.Source == labels.LabelSourceCIDR { + _, netIP, err := net.ParseCIDR(lbl.Key) + if err == nil { + if ms, _ := netIP.Mask.Size(); ms > maskSize { + mostSpecificCidr = netIP + maskSize = ms + } + } + } + } + if mostSpecificCidr != nil { + return []*net.IPNet{mostSpecificCidr} + } + } + return nil +} + func getIdentityCache(ids cache.IdentityCache) scIdentityCache { idCache := make(map[identity.NumericIdentity]scIdentity, len(ids)) for nid, lbls := range ids { @@ -340,7 +369,11 @@ func (s *selectorManager) Equal(b *selectorManager) bool { // of the selections. If the old version is returned, the user is // guaranteed to receive a notification including the update. func (s *selectorManager) GetSelections() []identity.NumericIdentity { - return *s.selections.Load() + selections := s.selections.Load() + if selections == nil { + return emptySelection + } + return *selections } // Selects return 'true' if the CachedSelector selects the given @@ -1093,11 +1126,18 @@ func (sc *SelectorCache) RemoveIdentitiesFQDNSelectors(fqdnSels []api.FQDNSelect sc.releaseIdentityMappings(identitiesToRelease) } -// GetLabels must be called while holding sc.mutex! -func (sc *SelectorCache) GetLabelsLocked(id identity.NumericIdentity) labels.LabelArray { +// GetNetsLocked returns the most specific CIDR for an identity. For the "World" identity +// it returns both IPv4 and IPv6. +func (sc *SelectorCache) GetNetsLocked(id identity.NumericIdentity) []*net.IPNet { ident, ok := sc.idCache[id] if !ok { - return labels.LabelArray{} + return nil + } + if !ident.computed { + log.WithFields(logrus.Fields{ + logfields.Identity: id, + logfields.Labels: ident.lbls, + }).Warning("GetNetsLocked: Identity with missing nets!") } - return ident.lbls + return ident.nets } diff --git a/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go b/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go index e2258a223ea..1044fc67e0f 100644 --- a/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go +++ b/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go @@ -279,17 +279,17 @@ type LogRecordDNS struct { // RCode is the response code // defined as per https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6 - // Use github.com/miekg/dns.RcodeToString map to retrieve string representation + // Use github.com/cilium/dns.RcodeToString map to retrieve string representation RCode int `json:"RCode,omitempty"` // QTypes are question types in DNS message // https://www.ietf.org/rfc/rfc1035.txt - // Use github.com/miekg/dns.TypeToString map to retrieve string representation + // Use github.com/cilium/dns.TypeToString map to retrieve string representation QTypes []uint16 `json:"QTypes,omitempty"` // AnswerTypes are record types in the answer section // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4 - // Use github.com/miekg/dns.TypeToString map to retrieve string representation + // Use github.com/cilium/dns.TypeToString map to retrieve string representation AnswerTypes []uint16 `json:"AnswerTypes,omitempty"` } diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/cilium/dns/.codecov.yml similarity index 100% rename from vendor/github.com/miekg/dns/.codecov.yml rename to vendor/github.com/cilium/dns/.codecov.yml diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/cilium/dns/.gitignore similarity index 100% rename from vendor/github.com/miekg/dns/.gitignore rename to vendor/github.com/cilium/dns/.gitignore diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/cilium/dns/AUTHORS similarity index 100% rename from vendor/github.com/miekg/dns/AUTHORS rename to vendor/github.com/cilium/dns/AUTHORS diff --git a/vendor/github.com/miekg/dns/CODEOWNERS b/vendor/github.com/cilium/dns/CODEOWNERS similarity index 100% rename from vendor/github.com/miekg/dns/CODEOWNERS rename to vendor/github.com/cilium/dns/CODEOWNERS diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/cilium/dns/CONTRIBUTORS similarity index 100% rename from vendor/github.com/miekg/dns/CONTRIBUTORS rename to vendor/github.com/cilium/dns/CONTRIBUTORS diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/cilium/dns/COPYRIGHT similarity index 100% rename from vendor/github.com/miekg/dns/COPYRIGHT rename to vendor/github.com/cilium/dns/COPYRIGHT diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/cilium/dns/LICENSE similarity index 100% rename from vendor/github.com/miekg/dns/LICENSE rename to vendor/github.com/cilium/dns/LICENSE diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/cilium/dns/Makefile.fuzz similarity index 100% rename from vendor/github.com/miekg/dns/Makefile.fuzz rename to vendor/github.com/cilium/dns/Makefile.fuzz diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/cilium/dns/Makefile.release similarity index 100% rename from vendor/github.com/miekg/dns/Makefile.release rename to vendor/github.com/cilium/dns/Makefile.release diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/cilium/dns/README.md similarity index 100% rename from vendor/github.com/miekg/dns/README.md rename to vendor/github.com/cilium/dns/README.md diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/cilium/dns/acceptfunc.go similarity index 100% rename from vendor/github.com/miekg/dns/acceptfunc.go rename to vendor/github.com/cilium/dns/acceptfunc.go diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/cilium/dns/client.go similarity index 98% rename from vendor/github.com/miekg/dns/client.go rename to vendor/github.com/cilium/dns/client.go index 9aa6585300f..f689b37a0c7 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/cilium/dns/client.go @@ -210,6 +210,32 @@ func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn } func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) { + start := time.Now() + err = c.SendContext(ctx, m, co, start) + if err != nil { + return nil, 0, err + } + + if isPacketConn(co.Conn) { + for { + r, err = co.ReadMsg() + // Ignore replies with mismatched IDs because they might be + // responses to earlier queries that timed out. + if err != nil || r.Id == m.Id { + break + } + } + } else { + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + } + + return r, time.Since(start), err +} + +func (c *Client) SendContext(ctx context.Context, m *Msg, co *Conn, t time.Time) error { opt := m.IsEdns0() // If EDNS0 is used use that for size. if opt != nil && opt.UDPSize() >= MinMsgSize { @@ -221,7 +247,6 @@ func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, } // write with the appropriate write timeout - t := time.Now() writeDeadline := t.Add(c.getTimeoutForRequest(c.writeTimeout())) readDeadline := t.Add(c.getTimeoutForRequest(c.readTimeout())) if deadline, ok := ctx.Deadline(); ok { @@ -237,27 +262,7 @@ func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, co.TsigSecret, co.TsigProvider = c.TsigSecret, c.TsigProvider - if err = co.WriteMsg(m); err != nil { - return nil, 0, err - } - - if isPacketConn(co.Conn) { - for { - r, err = co.ReadMsg() - // Ignore replies with mismatched IDs because they might be - // responses to earlier queries that timed out. - if err != nil || r.Id == m.Id { - break - } - } - } else { - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - } - rtt = time.Since(t) - return r, rtt, err + return co.WriteMsg(m) } // ReadMsg reads a message from the connection co. diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/cilium/dns/clientconfig.go similarity index 100% rename from vendor/github.com/miekg/dns/clientconfig.go rename to vendor/github.com/cilium/dns/clientconfig.go diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/cilium/dns/dane.go similarity index 100% rename from vendor/github.com/miekg/dns/dane.go rename to vendor/github.com/cilium/dns/dane.go diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/cilium/dns/defaults.go similarity index 100% rename from vendor/github.com/miekg/dns/defaults.go rename to vendor/github.com/cilium/dns/defaults.go diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/cilium/dns/dns.go similarity index 100% rename from vendor/github.com/miekg/dns/dns.go rename to vendor/github.com/cilium/dns/dns.go diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/cilium/dns/dnssec.go similarity index 100% rename from vendor/github.com/miekg/dns/dnssec.go rename to vendor/github.com/cilium/dns/dnssec.go diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/cilium/dns/dnssec_keygen.go similarity index 100% rename from vendor/github.com/miekg/dns/dnssec_keygen.go rename to vendor/github.com/cilium/dns/dnssec_keygen.go diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/cilium/dns/dnssec_keyscan.go similarity index 100% rename from vendor/github.com/miekg/dns/dnssec_keyscan.go rename to vendor/github.com/cilium/dns/dnssec_keyscan.go diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/cilium/dns/dnssec_privkey.go similarity index 100% rename from vendor/github.com/miekg/dns/dnssec_privkey.go rename to vendor/github.com/cilium/dns/dnssec_privkey.go diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/cilium/dns/doc.go similarity index 100% rename from vendor/github.com/miekg/dns/doc.go rename to vendor/github.com/cilium/dns/doc.go diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/cilium/dns/duplicate.go similarity index 100% rename from vendor/github.com/miekg/dns/duplicate.go rename to vendor/github.com/cilium/dns/duplicate.go diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/cilium/dns/edns.go similarity index 100% rename from vendor/github.com/miekg/dns/edns.go rename to vendor/github.com/cilium/dns/edns.go diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/cilium/dns/format.go similarity index 100% rename from vendor/github.com/miekg/dns/format.go rename to vendor/github.com/cilium/dns/format.go diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/cilium/dns/fuzz.go similarity index 100% rename from vendor/github.com/miekg/dns/fuzz.go rename to vendor/github.com/cilium/dns/fuzz.go diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/cilium/dns/generate.go similarity index 100% rename from vendor/github.com/miekg/dns/generate.go rename to vendor/github.com/cilium/dns/generate.go diff --git a/vendor/github.com/miekg/dns/hash.go b/vendor/github.com/cilium/dns/hash.go similarity index 100% rename from vendor/github.com/miekg/dns/hash.go rename to vendor/github.com/cilium/dns/hash.go diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/cilium/dns/labels.go similarity index 100% rename from vendor/github.com/miekg/dns/labels.go rename to vendor/github.com/cilium/dns/labels.go diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/cilium/dns/listen_no_reuseport.go similarity index 100% rename from vendor/github.com/miekg/dns/listen_no_reuseport.go rename to vendor/github.com/cilium/dns/listen_no_reuseport.go diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/cilium/dns/listen_reuseport.go similarity index 100% rename from vendor/github.com/miekg/dns/listen_reuseport.go rename to vendor/github.com/cilium/dns/listen_reuseport.go diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/cilium/dns/msg.go similarity index 100% rename from vendor/github.com/miekg/dns/msg.go rename to vendor/github.com/cilium/dns/msg.go diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/cilium/dns/msg_helpers.go similarity index 100% rename from vendor/github.com/miekg/dns/msg_helpers.go rename to vendor/github.com/cilium/dns/msg_helpers.go diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/cilium/dns/msg_truncate.go similarity index 100% rename from vendor/github.com/miekg/dns/msg_truncate.go rename to vendor/github.com/cilium/dns/msg_truncate.go diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/cilium/dns/nsecx.go similarity index 100% rename from vendor/github.com/miekg/dns/nsecx.go rename to vendor/github.com/cilium/dns/nsecx.go diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/cilium/dns/privaterr.go similarity index 100% rename from vendor/github.com/miekg/dns/privaterr.go rename to vendor/github.com/cilium/dns/privaterr.go diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/cilium/dns/reverse.go similarity index 100% rename from vendor/github.com/miekg/dns/reverse.go rename to vendor/github.com/cilium/dns/reverse.go diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/cilium/dns/sanitize.go similarity index 100% rename from vendor/github.com/miekg/dns/sanitize.go rename to vendor/github.com/cilium/dns/sanitize.go diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/cilium/dns/scan.go similarity index 100% rename from vendor/github.com/miekg/dns/scan.go rename to vendor/github.com/cilium/dns/scan.go diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/cilium/dns/scan_rr.go similarity index 100% rename from vendor/github.com/miekg/dns/scan_rr.go rename to vendor/github.com/cilium/dns/scan_rr.go diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/cilium/dns/serve_mux.go similarity index 100% rename from vendor/github.com/miekg/dns/serve_mux.go rename to vendor/github.com/cilium/dns/serve_mux.go diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/cilium/dns/server.go similarity index 100% rename from vendor/github.com/miekg/dns/server.go rename to vendor/github.com/cilium/dns/server.go diff --git a/vendor/github.com/cilium/dns/shared_client.go b/vendor/github.com/cilium/dns/shared_client.go new file mode 100644 index 00000000000..b15345122cc --- /dev/null +++ b/vendor/github.com/cilium/dns/shared_client.go @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package dns + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "time" +) + +// SharedClients holds a set of SharedClient instances. +type SharedClients struct { + // SharedClient's lock must not be taken while this lock is held! + lock sync.Mutex + // clients are created and destroyed on demand, hence 'Mutex' needs to be taken. + clients map[string]*SharedClient +} + +func NewSharedClients() *SharedClients { + return &SharedClients{ + clients: make(map[string]*SharedClient), + } +} + +func (s *SharedClients) Exchange(key string, conf *Client, m *Msg, serverAddrStr string) (r *Msg, rtt time.Duration, closer func(), err error) { + return s.ExchangeContext(context.Background(), key, conf, m, serverAddrStr) +} + +func (s *SharedClients) ExchangeContext(ctx context.Context, key string, conf *Client, m *Msg, serverAddrStr string) (r *Msg, rtt time.Duration, closer func(), err error) { + client, closer := s.GetSharedClient(key, conf, serverAddrStr) + r, rtt, err = client.ExchangeSharedContext(ctx, m) + return r, rtt, closer, err +} + +// GetSharedClient gets or creates an instance of SharedClient keyed with 'key'. if 'key' is an +// empty sting, a new client is always created and it is not actually shared. The returned 'closer' +// must be called once the client is no longer needed. Conversely, the returned 'client' must not be +// used after the closer is called. +func (s *SharedClients) GetSharedClient(key string, conf *Client, serverAddrStr string) (client *SharedClient, closer func()) { + if key == "" { + // Simplified case when the client is actually not shared + client = newSharedClient(conf, serverAddrStr) + return client, client.close + } + for { + // lock for s.clients access + s.lock.Lock() + // locate client to re-use if possible. + client = s.clients[key] + if client == nil { + client = newSharedClient(conf, serverAddrStr) + s.clients[key] = client + s.lock.Unlock() + // new client, we are done + break + } + s.lock.Unlock() + + // reusing client that may start closing while we wait for its lock + client.Lock() + if client.refcount > 0 { + // not closed, add our refcount + client.refcount++ + client.Unlock() + break + } + // client was closed while we waited for it's lock, discard and try again + client.Unlock() + client = nil + } + + return client, func() { + client.Lock() + defer client.Unlock() + client.refcount-- + if client.refcount == 0 { + // connection close must be completed while holding the client's lock to + // avoid a race where a new client dials using the same 5-tuple and gets a + // bind error. + // The client remains findable so that new users with the same key may wait + // for this closing to be done with. + client.close() + // Make client unreachable + // Must take s.lock for this. + s.lock.Lock() + delete(s.clients, key) + s.lock.Unlock() + } + } +} + +type request struct { + ctx context.Context + msg *Msg + ch chan sharedClientResponse +} + +type sharedClientResponse struct { + msg *Msg + rtt time.Duration + err error +} + +// A SharedClient keeps state for concurrent transactions on the same upstream client/connection. +type SharedClient struct { + serverAddr string + + *Client + + // requests is closed when the client needs to exit + requests chan request + // wg is waited on for the client finish exiting + wg sync.WaitGroup + + sync.Mutex // protects the fields below + refcount int + conn *Conn +} + +func newSharedClient(conf *Client, serverAddr string) *SharedClient { + return &SharedClient{ + refcount: 1, + serverAddr: serverAddr, + Client: conf, + requests: make(chan request), + } +} + +// ExchangeShared dials a connection to the server on first invocation, and starts a handler +// goroutines to send and receive responses, distributing them to appropriate concurrent caller +// based on the DNS message Id. +func (c *SharedClient) ExchangeShared(m *Msg) (r *Msg, rtt time.Duration, err error) { + return c.ExchangeSharedContext(context.Background(), m) +} + +// handler is started when the connection is dialed +func handler(wg *sync.WaitGroup, client *Client, conn *Conn, requests chan request) { + defer wg.Done() + + responses := make(chan sharedClientResponse) + + // Receive loop + wg.Add(1) + go func() { + defer wg.Done() + defer close(responses) + for { + r, err := conn.ReadMsg() + if err != nil { + // handler is not reading on the channel after closing + if errors.Is(err, net.ErrClosed) { + return + } + responses <- sharedClientResponse{nil, 0, err} + } else { + responses <- sharedClientResponse{r, 0, nil} + } + } + }() + + type waiter struct { + ch chan sharedClientResponse + start time.Time + } + waitingResponses := make(map[uint16]waiter) + defer func() { + conn.Close() + + // Drain responses send by receive loop to allow it to exit. + // It may be repeatedly reading after an i/o timeout, for example. + for range responses { + } + + for _, waiter := range waitingResponses { + waiter.ch <- sharedClientResponse{nil, 0, net.ErrClosed} + close(waiter.ch) + } + }() + + for { + select { + case req, ok := <-requests: + if !ok { + return + } + start := time.Now() + err := client.SendContext(req.ctx, req.msg, conn, start) + if err != nil { + req.ch <- sharedClientResponse{nil, 0, err} + close(req.ch) + } else { + waitingResponses[req.msg.Id] = waiter{req.ch, start} + } + + case resp, ok := <-responses: + if !ok { + return + } + if resp.err != nil { + // ReadMsg failed, but we cannot match it to a request, + // so complete all pending requests. + for _, waiter := range waitingResponses { + waiter.ch <- sharedClientResponse{nil, 0, resp.err} + close(waiter.ch) + } + waitingResponses = make(map[uint16]waiter) + } else if resp.msg != nil { + if waiter, ok := waitingResponses[resp.msg.Id]; ok { + delete(waitingResponses, resp.msg.Id) + resp.rtt = time.Since(waiter.start) + waiter.ch <- resp + close(waiter.ch) + } + } + } + } +} + +func (c *SharedClient) ExchangeSharedContext(ctx context.Context, m *Msg) (r *Msg, rtt time.Duration, err error) { + c.Lock() + if c.conn == nil { + c.conn, err = c.DialContext(ctx, c.serverAddr) + if err != nil { + c.Unlock() + return nil, 0, fmt.Errorf("failed to dial connection to %v: %w", c.serverAddr, err) + } + // Start handler for sending and receiving. + c.wg.Add(1) + go handler(&c.wg, c.Client, c.conn, c.requests) + } + c.Unlock() + + respCh := make(chan sharedClientResponse) + c.requests <- request{ + ctx: ctx, + msg: m, + ch: respCh, + } + resp := <-respCh + return resp.msg, resp.rtt, resp.err +} + +// close closes and waits for the close to finish. +// Must be called while holding client's lock. +func (c *SharedClient) close() { + close(c.requests) + c.wg.Wait() + c.conn = nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/cilium/dns/sig0.go similarity index 100% rename from vendor/github.com/miekg/dns/sig0.go rename to vendor/github.com/cilium/dns/sig0.go diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/cilium/dns/singleinflight.go similarity index 100% rename from vendor/github.com/miekg/dns/singleinflight.go rename to vendor/github.com/cilium/dns/singleinflight.go diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/cilium/dns/smimea.go similarity index 100% rename from vendor/github.com/miekg/dns/smimea.go rename to vendor/github.com/cilium/dns/smimea.go diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/cilium/dns/svcb.go similarity index 100% rename from vendor/github.com/miekg/dns/svcb.go rename to vendor/github.com/cilium/dns/svcb.go diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/cilium/dns/tlsa.go similarity index 100% rename from vendor/github.com/miekg/dns/tlsa.go rename to vendor/github.com/cilium/dns/tlsa.go diff --git a/vendor/github.com/miekg/dns/tools.go b/vendor/github.com/cilium/dns/tools.go similarity index 100% rename from vendor/github.com/miekg/dns/tools.go rename to vendor/github.com/cilium/dns/tools.go diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/cilium/dns/tsig.go similarity index 100% rename from vendor/github.com/miekg/dns/tsig.go rename to vendor/github.com/cilium/dns/tsig.go diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/cilium/dns/types.go similarity index 100% rename from vendor/github.com/miekg/dns/types.go rename to vendor/github.com/cilium/dns/types.go diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/cilium/dns/udp.go similarity index 100% rename from vendor/github.com/miekg/dns/udp.go rename to vendor/github.com/cilium/dns/udp.go diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/cilium/dns/udp_windows.go similarity index 100% rename from vendor/github.com/miekg/dns/udp_windows.go rename to vendor/github.com/cilium/dns/udp_windows.go diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/cilium/dns/update.go similarity index 100% rename from vendor/github.com/miekg/dns/update.go rename to vendor/github.com/cilium/dns/update.go diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/cilium/dns/version.go similarity index 100% rename from vendor/github.com/miekg/dns/version.go rename to vendor/github.com/cilium/dns/version.go diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/cilium/dns/xfr.go similarity index 100% rename from vendor/github.com/miekg/dns/xfr.go rename to vendor/github.com/cilium/dns/xfr.go diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/cilium/dns/zduplicate.go similarity index 100% rename from vendor/github.com/miekg/dns/zduplicate.go rename to vendor/github.com/cilium/dns/zduplicate.go diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/cilium/dns/zmsg.go similarity index 100% rename from vendor/github.com/miekg/dns/zmsg.go rename to vendor/github.com/cilium/dns/zmsg.go diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/cilium/dns/ztypes.go similarity index 100% rename from vendor/github.com/miekg/dns/ztypes.go rename to vendor/github.com/cilium/dns/ztypes.go diff --git a/vendor/github.com/cilium/ebpf/features/doc.go b/vendor/github.com/cilium/ebpf/features/doc.go new file mode 100644 index 00000000000..acc57e3b1e3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/doc.go @@ -0,0 +1,19 @@ +// Package features allows probing for BPF features available to the calling process. +// +// In general, the error return values from feature probes in this package +// all have the following semantics unless otherwise specified: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that resource +// creation may succeed despite an error being returned. For example, some +// map and program types cannot reliably be probed and will return an +// inconclusive error. +// +// As a rule, only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached by the library and persist throughout any changes +// to the process' environment, like capability changes. +package features diff --git a/vendor/github.com/cilium/ebpf/features/map.go b/vendor/github.com/cilium/ebpf/features/map.go new file mode 100644 index 00000000000..093f37ee1c9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/map.go @@ -0,0 +1,321 @@ +package features + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveMapType probes the running kernel for the availability of the specified map type. +// +// See the package documentation for the meaning of the error return value. +func HaveMapType(mt ebpf.MapType) error { + return haveMapTypeMatrix.Result(mt) +} + +func probeCgroupStorageMap(mt sys.MapType) error { + // keySize needs to be sizeof(struct{u32 + u64}) = 12 (+ padding = 16) + // by using unsafe.Sizeof(int) we are making sure that this works on 32bit and 64bit archs + return createMap(&sys.MapCreateAttr{ + MapType: mt, + ValueSize: 4, + KeySize: uint32(8 + unsafe.Sizeof(int(0))), + MaxEntries: 0, + }) +} + +func probeStorageMap(mt sys.MapType) error { + // maxEntries needs to be 0 + // BPF_F_NO_PREALLOC needs to be set + // btf* fields need to be set + // see alloc_check for local_storage map types + err := createMap(&sys.MapCreateAttr{ + MapType: mt, + KeySize: 4, + ValueSize: 4, + MaxEntries: 0, + MapFlags: unix.BPF_F_NO_PREALLOC, + BtfKeyTypeId: 1, + BtfValueTypeId: 1, + BtfFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + // Triggered by BtfFd. + return nil + } + return err +} + +func probeNestedMap(mt sys.MapType) error { + // assign invalid innerMapFd to pass validation check + // will return EBADF + err := probeMap(&sys.MapCreateAttr{ + MapType: mt, + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + return nil + } + return err +} + +func probeMap(attr *sys.MapCreateAttr) error { + if attr.KeySize == 0 { + attr.KeySize = 4 + } + if attr.ValueSize == 0 { + attr.ValueSize = 4 + } + attr.MaxEntries = 1 + return createMap(attr) +} + +func createMap(attr *sys.MapCreateAttr) error { + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + return nil + } + + switch { + // EINVAL occurs when attempting to create a map with an unknown type. + // E2BIG occurs when MapCreateAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given map type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + return ebpf.ErrNotSupported + } + + return err +} + +var haveMapTypeMatrix = internal.FeatureMatrix[ebpf.MapType]{ + ebpf.Hash: {Version: "3.19"}, + ebpf.Array: {Version: "3.19"}, + ebpf.ProgramArray: {Version: "4.2"}, + ebpf.PerfEventArray: {Version: "4.3"}, + ebpf.PerCPUHash: {Version: "4.6"}, + ebpf.PerCPUArray: {Version: "4.6"}, + ebpf.StackTrace: { + Version: "4.6", + Fn: func() error { + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK_TRACE, + ValueSize: 8, // sizeof(uint64) + }) + }, + }, + ebpf.CGroupArray: {Version: "4.8"}, + ebpf.LRUHash: {Version: "4.10"}, + ebpf.LRUCPUHash: {Version: "4.10"}, + ebpf.LPMTrie: { + Version: "4.11", + Fn: func() error { + // keySize and valueSize need to be sizeof(struct{u32 + u8}) + 1 + padding = 8 + // BPF_F_NO_PREALLOC needs to be set + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_LPM_TRIE, + KeySize: 8, + ValueSize: 8, + MapFlags: unix.BPF_F_NO_PREALLOC, + }) + }, + }, + ebpf.ArrayOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_ARRAY_OF_MAPS) }, + }, + ebpf.HashOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_HASH_OF_MAPS) }, + }, + ebpf.DevMap: {Version: "4.14"}, + ebpf.SockMap: {Version: "4.14"}, + ebpf.CPUMap: {Version: "4.15"}, + ebpf.XSKMap: {Version: "4.18"}, + ebpf.SockHash: {Version: "4.18"}, + ebpf.CGroupStorage: { + Version: "4.19", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_CGROUP_STORAGE) }, + }, + ebpf.ReusePortSockArray: {Version: "4.19"}, + ebpf.PerCPUCGroupStorage: { + Version: "4.20", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) }, + }, + ebpf.Queue: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_QUEUE, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.Stack: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.SkStorage: { + Version: "5.2", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_SK_STORAGE) }, + }, + ebpf.DevMapHash: {Version: "5.4"}, + ebpf.StructOpsMap: { + Version: "5.6", + Fn: func() error { + // StructOps requires setting a vmlinux type id, but id 1 will always + // resolve to some type of integer. This will cause ENOTSUPP. + err := probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STRUCT_OPS, + BtfVmlinuxValueTypeId: 1, + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the map type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.RingBuf: { + Version: "5.8", + Fn: func() error { + // keySize and valueSize need to be 0 + // maxEntries needs to be power of 2 and PAGE_ALIGNED + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_RINGBUF, + KeySize: 0, + ValueSize: 0, + MaxEntries: uint32(os.Getpagesize()), + }) + }, + }, + ebpf.InodeStorage: { + Version: "5.10", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_INODE_STORAGE) }, + }, + ebpf.TaskStorage: { + Version: "5.11", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_TASK_STORAGE) }, + }, +} + +func init() { + for mt, ft := range haveMapTypeMatrix { + ft.Name = mt.String() + if ft.Fn == nil { + // Avoid referring to the loop variable in the closure. + mt := sys.MapType(mt) + ft.Fn = func() error { return probeMap(&sys.MapCreateAttr{MapType: mt}) } + } + } +} + +// MapFlags document which flags may be feature probed. +type MapFlags = sys.MapFlags + +// Flags which may be feature probed. +const ( + BPF_F_NO_PREALLOC = sys.BPF_F_NO_PREALLOC + BPF_F_RDONLY_PROG = sys.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = sys.BPF_F_WRONLY_PROG + BPF_F_MMAPABLE = sys.BPF_F_MMAPABLE + BPF_F_INNER_MAP = sys.BPF_F_INNER_MAP +) + +// HaveMapFlag probes the running kernel for the availability of the specified map flag. +// +// Returns an error if flag is not one of the flags declared in this package. +// See the package documentation for the meaning of the error return value. +func HaveMapFlag(flag MapFlags) (err error) { + return haveMapFlagsMatrix.Result(flag) +} + +func probeMapFlag(attr *sys.MapCreateAttr) error { + // For now, we do not check if the map type is supported because we only support + // probing for flags defined on arrays and hashs that are always supported. + // In the future, if we allow probing on flags defined on newer types, checking for map type + // support will be required. + if attr.MapType == sys.BPF_MAP_TYPE_UNSPEC { + attr.MapType = sys.BPF_MAP_TYPE_ARRAY + } + + attr.KeySize = 4 + attr.ValueSize = 4 + attr.MaxEntries = 1 + + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + } else if errors.Is(err, unix.EINVAL) { + // EINVAL occurs when attempting to create a map with an unknown type or an unknown flag. + err = ebpf.ErrNotSupported + } + + return err +} + +var haveMapFlagsMatrix = internal.FeatureMatrix[MapFlags]{ + BPF_F_NO_PREALLOC: { + Version: "4.6", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_HASH, + MapFlags: BPF_F_NO_PREALLOC, + }) + }, + }, + BPF_F_RDONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_RDONLY_PROG, + }) + }, + }, + BPF_F_WRONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_WRONLY_PROG, + }) + }, + }, + BPF_F_MMAPABLE: { + Version: "5.5", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_MMAPABLE, + }) + }, + }, + BPF_F_INNER_MAP: { + Version: "5.10", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_INNER_MAP, + }) + }, + }, +} + +func init() { + for mf, ft := range haveMapFlagsMatrix { + ft.Name = fmt.Sprint(mf) + } +} diff --git a/vendor/github.com/cilium/ebpf/features/misc.go b/vendor/github.com/cilium/ebpf/features/misc.go new file mode 100644 index 00000000000..de07d3801b7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/misc.go @@ -0,0 +1,79 @@ +package features + +import ( + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// HaveLargeInstructions probes the running kernel if more than 4096 instructions +// per program are supported. +// +// Upstream commit c04c0d2b968a ("bpf: increase complexity limit and maximum program size"). +// +// See the package documentation for the meaning of the error return value. +var HaveLargeInstructions = internal.NewFeatureTest(">4096 instructions", "5.2", func() error { + const maxInsns = 4096 + + insns := make(asm.Instructions, maxInsns, maxInsns+1) + for i := range insns { + insns[i] = asm.Mov.Imm(asm.R0, 1) + } + insns = append(insns, asm.Return()) + + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: insns, + }) +}) + +// HaveBoundedLoops probes the running kernel if bounded loops are supported. +// +// Upstream commit 2589726d12a1 ("bpf: introduce bounded loops"). +// +// See the package documentation for the meaning of the error return value. +var HaveBoundedLoops = internal.NewFeatureTest("bounded loops", "5.3", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 10), + asm.Sub.Imm(asm.R0, 1).WithSymbol("loop"), + asm.JNE.Imm(asm.R0, 0, "loop"), + asm.Return(), + }, + }) +}) + +// HaveV2ISA probes the running kernel if instructions of the v2 ISA are supported. +// +// Upstream commit 92b31a9af73b ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions"). +// +// See the package documentation for the meaning of the error return value. +var HaveV2ISA = internal.NewFeatureTest("v2 ISA", "4.14", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) +}) + +// HaveV3ISA probes the running kernel if instructions of the v3 ISA are supported. +// +// Upstream commit 092ed0968bb6 ("bpf: verifier support JMP32"). +// +// See the package documentation for the meaning of the error return value. +var HaveV3ISA = internal.NewFeatureTest("v3 ISA", "5.1", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm32(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) +}) diff --git a/vendor/github.com/cilium/ebpf/features/prog.go b/vendor/github.com/cilium/ebpf/features/prog.go new file mode 100644 index 00000000000..a1136379672 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/prog.go @@ -0,0 +1,297 @@ +package features + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveProgType probes the running kernel for the availability of the specified program type. +// +// Deprecated: use HaveProgramType() instead. +var HaveProgType = HaveProgramType + +// HaveProgramType probes the running kernel for the availability of the specified program type. +// +// See the package documentation for the meaning of the error return value. +func HaveProgramType(pt ebpf.ProgramType) (err error) { + return haveProgramTypeMatrix.Result(pt) +} + +func probeProgram(spec *ebpf.ProgramSpec) error { + if spec.Instructions == nil { + spec.Instructions = asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + } + } + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + switch { + // EINVAL occurs when attempting to create a program with an unknown type. + // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given prog type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + err = ebpf.ErrNotSupported + } + + return err +} + +var haveProgramTypeMatrix = internal.FeatureMatrix[ebpf.ProgramType]{ + ebpf.SocketFilter: {Version: "3.19"}, + ebpf.Kprobe: {Version: "4.1"}, + ebpf.SchedCLS: {Version: "4.1"}, + ebpf.SchedACT: {Version: "4.1"}, + ebpf.TracePoint: {Version: "4.7"}, + ebpf.XDP: {Version: "4.8"}, + ebpf.PerfEvent: {Version: "4.9"}, + ebpf.CGroupSKB: {Version: "4.10"}, + ebpf.CGroupSock: {Version: "4.10"}, + ebpf.LWTIn: {Version: "4.10"}, + ebpf.LWTOut: {Version: "4.10"}, + ebpf.LWTXmit: {Version: "4.10"}, + ebpf.SockOps: {Version: "4.13"}, + ebpf.SkSKB: {Version: "4.14"}, + ebpf.CGroupDevice: {Version: "4.15"}, + ebpf.SkMsg: {Version: "4.17"}, + ebpf.RawTracepoint: {Version: "4.17"}, + ebpf.CGroupSockAddr: { + Version: "4.17", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockAddr, + AttachType: ebpf.AttachCGroupInet4Connect, + }) + }, + }, + ebpf.LWTSeg6Local: {Version: "4.18"}, + ebpf.LircMode2: {Version: "4.18"}, + ebpf.SkReuseport: {Version: "4.19"}, + ebpf.FlowDissector: {Version: "4.20"}, + ebpf.CGroupSysctl: {Version: "5.2"}, + ebpf.RawTracepointWritable: {Version: "5.2"}, + ebpf.CGroupSockopt: { + Version: "5.3", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockopt, + AttachType: ebpf.AttachCGroupGetsockopt, + }) + }, + }, + ebpf.Tracing: { + Version: "5.5", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Tracing, + AttachType: ebpf.AttachTraceFEntry, + AttachTo: "bpf_init", + }) + }, + }, + ebpf.StructOps: { + Version: "5.6", + Fn: func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.StructOps, + License: "GPL", + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the program type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.Extension: { + Version: "5.6", + Fn: func() error { + // create btf.Func to add to first ins of target and extension so both progs are btf powered + btfFn := btf.Func{ + Name: "a", + Type: &btf.FuncProto{ + Return: &btf.Int{}, + }, + Linkage: btf.GlobalFunc, + } + insns := asm.Instructions{ + btf.WithFuncMetadata(asm.Mov.Imm(asm.R0, 0), &btfFn), + asm.Return(), + } + + // create target prog + prog, err := ebpf.NewProgramWithOptions( + &ebpf.ProgramSpec{ + Type: ebpf.XDP, + Instructions: insns, + }, + ebpf.ProgramOptions{ + LogDisabled: true, + }, + ) + if err != nil { + return err + } + defer prog.Close() + + // probe for Extension prog with target + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Extension, + Instructions: insns, + AttachTarget: prog, + AttachTo: btfFn.Name, + }) + }, + }, + ebpf.LSM: { + Version: "5.7", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.LSM, + AttachType: ebpf.AttachLSMMac, + AttachTo: "file_mprotect", + License: "GPL", + }) + }, + }, + ebpf.SkLookup: { + Version: "5.9", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SkLookup, + AttachType: ebpf.AttachSkLookup, + }) + }, + }, + ebpf.Syscall: { + Version: "5.14", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Syscall, + Flags: unix.BPF_F_SLEEPABLE, + }) + }, + }, +} + +func init() { + for key, ft := range haveProgramTypeMatrix { + ft.Name = key.String() + if ft.Fn == nil { + key := key // avoid the dreaded loop variable problem + ft.Fn = func() error { return probeProgram(&ebpf.ProgramSpec{Type: key}) } + } + } +} + +type helperKey struct { + typ ebpf.ProgramType + helper asm.BuiltinFunc +} + +var helperCache = internal.NewFeatureCache(func(key helperKey) *internal.FeatureTest { + return &internal.FeatureTest{ + Name: fmt.Sprintf("%s for program type %s", key.helper, key.typ), + Fn: func() error { + return haveProgramHelper(key.typ, key.helper) + }, + } +}) + +// HaveProgramHelper probes the running kernel for the availability of the specified helper +// function to a specified program type. +// Return values have the following semantics: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that program creation may +// succeed despite an error being returned. +// Only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached and persist throughout any process capability changes. +func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + if helper > helper.Max() { + return os.ErrInvalid + } + + return helperCache.Result(helperKey{pt, helper}) +} + +func haveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + if ok := helperProbeNotImplemented(pt); ok { + return fmt.Errorf("no feature probe for %v/%v", pt, helper) + } + + if err := HaveProgramType(pt); err != nil { + return err + } + + spec := &ebpf.ProgramSpec{ + Type: pt, + Instructions: asm.Instructions{ + helper.Call(), + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "GPL", + } + + switch pt { + case ebpf.CGroupSockAddr: + spec.AttachType = ebpf.AttachCGroupInet4Connect + case ebpf.CGroupSockopt: + spec.AttachType = ebpf.AttachCGroupGetsockopt + case ebpf.SkLookup: + spec.AttachType = ebpf.AttachSkLookup + case ebpf.Syscall: + spec.Flags = unix.BPF_F_SLEEPABLE + } + + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + switch { + // EACCES occurs when attempting to create a program probe with a helper + // while the register args when calling this helper aren't set up properly. + // We interpret this as the helper being available, because the verifier + // returns EINVAL if the helper is not supported by the running kernel. + case errors.Is(err, unix.EACCES): + // TODO: possibly we need to check verifier output here to be sure + err = nil + + // EINVAL occurs when attempting to create a program with an unknown helper. + case errors.Is(err, unix.EINVAL): + // TODO: possibly we need to check verifier output here to be sure + err = ebpf.ErrNotSupported + } + + return err +} + +func helperProbeNotImplemented(pt ebpf.ProgramType) bool { + switch pt { + case ebpf.Extension, ebpf.LSM, ebpf.StructOps, ebpf.Tracing: + return true + } + return false +} diff --git a/vendor/github.com/cilium/ebpf/features/version.go b/vendor/github.com/cilium/ebpf/features/version.go new file mode 100644 index 00000000000..69e1c39c1a5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/version.go @@ -0,0 +1,18 @@ +package features + +import "github.com/cilium/ebpf/internal" + +// LinuxVersionCode returns the version of the currently running kernel +// as defined in the LINUX_VERSION_CODE compile-time macro. It is represented +// in the format described by the KERNEL_VERSION macro from linux/version.h. +// +// Do not use the version to make assumptions about the presence of certain +// kernel features, always prefer feature probes in this package. Some +// distributions backport or disable eBPF features. +func LinuxVersionCode() (uint32, error) { + v, err := internal.KernelVersion() + if err != nil { + return 0, err + } + return v.Kernel(), nil +} diff --git a/vendor/github.com/cilium/little-vm-helper/pkg/kernels/conf.go b/vendor/github.com/cilium/little-vm-helper/pkg/kernels/conf.go index f2aea28b414..d6eb0a501b4 100644 --- a/vendor/github.com/cilium/little-vm-helper/pkg/kernels/conf.go +++ b/vendor/github.com/cilium/little-vm-helper/pkg/kernels/conf.go @@ -62,8 +62,8 @@ var ConfigOptGroups = map[string][]ConfigOption{ // NB: is not disabled from the final config // {"--disable", "CONFIG_CDROM"}, {"--disable", "CONFIG_ISO9669_FS"}, - // wireless {"--disable", "CONFIG_CFG80211"}, + {"--disable", "CONFIG_WIRELESS"}, {"--disable", "CONFIG_RFKILL"}, {"--disable", "CONFIG_MACINTOSH_DRIVERS"}, {"--disable", "CONFIG_SOUND"}, @@ -72,6 +72,11 @@ var ConfigOptGroups = map[string][]ConfigOption{ {"--disable", "CONFIG_USB"}, {"--disable", "CONFIG_WLAN"}, {"--disable", "CONFIG_HID"}, + {"--disable", "CONFIG_I2C"}, + {"--disable", "CONFIG_PCMCIA"}, + {"--disable", "CONFIG_MD"}, + {"--disable", "CONFIG_DMADEVICES"}, + {"--disable", "CONFIG_THERMAL"}, }, "bpf": []ConfigOption{ {"--enable", "CONFIG_BPF"}, diff --git a/vendor/github.com/cilium/little-vm-helper/pkg/kernels/dir.go b/vendor/github.com/cilium/little-vm-helper/pkg/kernels/dir.go index 8cb3f6d8879..630b9dd5f95 100644 --- a/vendor/github.com/cilium/little-vm-helper/pkg/kernels/dir.go +++ b/vendor/github.com/cilium/little-vm-helper/pkg/kernels/dir.go @@ -6,6 +6,7 @@ package kernels import ( "bufio" "context" + "errors" "fmt" "os" "path/filepath" @@ -13,7 +14,6 @@ import ( "runtime" "github.com/cilium/little-vm-helper/pkg/logcmd" - "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" ) @@ -109,8 +109,9 @@ func kcfonfigValidate(opts []ConfigOption) error { optMap[opt] = mapVal if mapVal.enabled != optEnabled { - err := fmt.Errorf("value %s misconfigured: expected: %t but seems to be %t based on '%s'", opt, mapVal.enabled, optEnabled, txt) - ret = multierror.Append(ret, err) + ret = errors.Join(ret, + fmt.Errorf("value %s misconfigured: expected: %t but seems to be %t based on '%s'", + opt, mapVal.enabled, optEnabled, txt)) } } @@ -121,8 +122,7 @@ func kcfonfigValidate(opts []ConfigOption) error { for i, v := range optMap { if v.enabled && !v.checked { - err := fmt.Errorf("value %s enabled but not found", i) - ret = multierror.Append(ret, err) + ret = errors.Join(ret, fmt.Errorf("value %s enabled but not found", i)) } } diff --git a/vendor/github.com/cilium/little-vm-helper/pkg/kernels/git.go b/vendor/github.com/cilium/little-vm-helper/pkg/kernels/git.go index 17413116960..84018b5072f 100644 --- a/vendor/github.com/cilium/little-vm-helper/pkg/kernels/git.go +++ b/vendor/github.com/cilium/little-vm-helper/pkg/kernels/git.go @@ -5,13 +5,13 @@ package kernels import ( "context" + "errors" "fmt" "os" "path/filepath" "github.com/cilium/little-vm-helper/pkg/logcmd" - "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" ) @@ -116,7 +116,7 @@ func gitRemoveWorkdir(ctx context.Context, log logrus.FieldLogger, arg *gitRemov arg.workDir, } if err := logcmd.RunAndLogCommandContext(ctx, log, GitBinary, worktreeRemoveArgs...); err != nil { - multierror.Append(res, fmt.Errorf("did not remove worktree: %w", err)) + res = errors.Join(res, fmt.Errorf("did not remove worktree: %w", err)) } remoteRemoveArgs := []string{ @@ -125,7 +125,7 @@ func gitRemoveWorkdir(ctx context.Context, log logrus.FieldLogger, arg *gitRemov arg.remoteName, } if err := logcmd.RunAndLogCommandContext(ctx, log, GitBinary, remoteRemoveArgs...); err != nil { - multierror.Append(res, fmt.Errorf("did not remove remote: %w", err)) + res = errors.Join(res, fmt.Errorf("did not remove remote: %w", err)) } branchRemoveArgs := []string{ @@ -134,7 +134,7 @@ func gitRemoveWorkdir(ctx context.Context, log logrus.FieldLogger, arg *gitRemov arg.localBranch, } if err := logcmd.RunAndLogCommandContext(ctx, log, GitBinary, branchRemoveArgs...); err != nil { - multierror.Append(res, fmt.Errorf("did not remove local branch: %w", err)) + res = errors.Join(res, fmt.Errorf("did not remove local branch: %w", err)) } return res diff --git a/vendor/github.com/containerd/containerd/log/context_deprecated.go b/vendor/github.com/containerd/containerd/log/context_deprecated.go new file mode 100644 index 00000000000..9e9e8b49135 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context_deprecated.go @@ -0,0 +1,149 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/containerd/log" +) + +// G is a shorthand for [GetLogger]. +// +// Deprecated: use [log.G]. +var G = log.G + +// L is an alias for the standard logger. +// +// Deprecated: use [log.L]. +var L = log.L + +// Fields type to pass to "WithFields". +// +// Deprecated: use [log.Fields]. +type Fields = log.Fields + +// Entry is a logging entry. +// +// Deprecated: use [log.Entry]. +type Entry = log.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +// +// Deprecated: use [log.RFC3339NanoFixed]. +const RFC3339NanoFixed = log.RFC3339NanoFixed + +// Level is a logging level. +// +// Deprecated: use [log.Level]. +type Level = log.Level + +// Supported log levels. +const ( + // TraceLevel level. + // + // Deprecated: use [log.TraceLevel]. + TraceLevel Level = log.TraceLevel + + // DebugLevel level. + // + // Deprecated: use [log.DebugLevel]. + DebugLevel Level = log.DebugLevel + + // InfoLevel level. + // + // Deprecated: use [log.InfoLevel]. + InfoLevel Level = log.InfoLevel + + // WarnLevel level. + // + // Deprecated: use [log.WarnLevel]. + WarnLevel Level = log.WarnLevel + + // ErrorLevel level + // + // Deprecated: use [log.ErrorLevel]. + ErrorLevel Level = log.ErrorLevel + + // FatalLevel level. + // + // Deprecated: use [log.FatalLevel]. + FatalLevel Level = log.FatalLevel + + // PanicLevel level. + // + // Deprecated: use [log.PanicLevel]. + PanicLevel Level = log.PanicLevel +) + +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// Deprecated: use [log.SetLevel]. +func SetLevel(level string) error { + return log.SetLevel(level) +} + +// GetLevel returns the current log level. +// +// Deprecated: use [log.GetLevel]. +func GetLevel() log.Level { + return log.GetLevel() +} + +// OutputFormat specifies a log output format. +// +// Deprecated: use [log.OutputFormat]. +type OutputFormat = log.OutputFormat + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + // + // Deprecated: use [log.TextFormat]. + TextFormat log.OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + // + // Deprecated: use [log.JSONFormat]. + JSONFormat log.OutputFormat = "json" +) + +// SetFormat sets the log output format. +// +// Deprecated: use [log.SetFormat]. +func SetFormat(format OutputFormat) error { + return log.SetFormat(format) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +// +// Deprecated: use [log.WithLogger]. +func WithLogger(ctx context.Context, logger *log.Entry) context.Context { + return log.WithLogger(ctx, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +// +// Deprecated: use [log.GetLogger]. +func GetLogger(ctx context.Context) *log.Entry { + return log.GetLogger(ctx) +} diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go index cf916789881..370508d28d1 100644 --- a/vendor/github.com/containerd/containerd/plugin/context.go +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -21,9 +21,10 @@ import ( "fmt" "path/filepath" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events/exchange" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // InitContext is used for plugin initialization @@ -133,6 +134,19 @@ func (ps *Set) Get(t Type) (interface{}, error) { return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) } +// GetByID returns the plugin of the given type and ID +func (ps *Set) GetByID(t Type, id string) (*Plugin, error) { + typSet, ok := ps.byTypeAndID[t] + if !ok || len(typSet) == 0 { + return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound) + } + p, ok := typSet[id] + if !ok { + return nil, fmt.Errorf("no plugins registered for %s %q: %w", t, id, errdefs.ErrNotFound) + } + return p, nil +} + // GetAll returns all initialized plugins func (ps *Set) GetAll() []*Plugin { return ps.ordered diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index 082f3178b0d..f1be877fcb2 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -90,6 +90,8 @@ const ( SandboxStorePlugin Type = "io.containerd.sandbox.store.v1" // SandboxControllerPlugin implements a sandbox controller SandboxControllerPlugin Type = "io.containerd.sandbox.controller.v1" + // WarningPlugin implements a warning service + WarningPlugin Type = "io.containerd.warning.v1" ) const ( @@ -98,7 +100,8 @@ const ( // RuntimeRuncV1 is the runc runtime that supports a single container RuntimeRuncV1 = "io.containerd.runc.v1" // RuntimeRuncV2 is the runc runtime that supports multiple containers per shim - RuntimeRuncV2 = "io.containerd.runc.v2" + RuntimeRuncV2 = "io.containerd.runc.v2" + DeprecationsPlugin = "deprecations" ) // Registration contains information for registering a plugin @@ -143,7 +146,7 @@ var register = struct { }{} // Load loads all plugins at the provided path into containerd -func Load(path string) (err error) { +func Load(path string) (count int, err error) { defer func() { if v := recover(); v != nil { rerr, ok := v.(error) diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go index 0ee442d9553..0671630a202 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -25,12 +25,13 @@ import ( "runtime" ) -// loadPlugins loads all plugins for the OS and Arch -// that containerd is built for inside the provided path -func loadPlugins(path string) error { +// loadPlugins loads all plugins for the OS and Arch that containerd is built +// for inside the provided path and returns the count of successfully-loaded +// plugins +func loadPlugins(path string) (int, error) { abs, err := filepath.Abs(path) if err != nil { - return err + return 0, err } pattern := filepath.Join(abs, fmt.Sprintf( "*-%s-%s.%s", @@ -40,14 +41,16 @@ func loadPlugins(path string) error { )) libs, err := filepath.Glob(pattern) if err != nil { - return err + return 0, err } + loaded := 0 for _, lib := range libs { if _, err := plugin.Open(lib); err != nil { - return err + return loaded, err } + loaded++ } - return nil + return loaded, nil } // getLibExt returns a platform specific lib extension for diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go index c0b53a6b4d1..b0896d6908d 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_other.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -18,7 +18,7 @@ package plugin -func loadPlugins(path string) error { +func loadPlugins(path string) (int, error) { // plugins not supported until 1.8 - return nil + return 0, nil } diff --git a/vendor/github.com/containerd/log/.golangci.yml b/vendor/github.com/containerd/log/.golangci.yml new file mode 100644 index 00000000000..a695775df49 --- /dev/null +++ b/vendor/github.com/containerd/log/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - exportloopref # Checks for pointers to enclosing loop variables + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + skip-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/log/LICENSE b/vendor/github.com/containerd/log/LICENSE new file mode 100644 index 00000000000..584149b6ee2 --- /dev/null +++ b/vendor/github.com/containerd/log/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/log/README.md b/vendor/github.com/containerd/log/README.md new file mode 100644 index 00000000000..00e08498801 --- /dev/null +++ b/vendor/github.com/containerd/log/README.md @@ -0,0 +1,17 @@ +# log + +A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages. + +This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation. +In the future this package may be replaced with a common go logging interface. + +## Project details + +**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. + diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/log/context.go similarity index 100% rename from vendor/github.com/containerd/containerd/log/context.go rename to vendor/github.com/containerd/log/context.go diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index b4800567345..42bf32aab00 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -9,6 +9,8 @@ func Render(doc []byte) []byte { renderer := NewRoffRenderer() return blackfriday.Run(doc, - []blackfriday.Option{blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions())}...) + []blackfriday.Option{ + blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index be2b3436062..4b19188d90f 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bytes" "fmt" "io" "os" @@ -34,10 +35,10 @@ const ( hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" linkTag = "\n\\[la]" linkCloseTag = "\\[ra]" - codespanTag = "\\fB\\fC" + codespanTag = "\\fB" codespanCloseTag = "\\fR" - codeTag = "\n.PP\n.RS\n\n.nf\n" - codeCloseTag = "\n.fi\n.RE\n" + codeTag = "\n.EX\n" + codeCloseTag = "\n.EE\n" quoteTag = "\n.PP\n.RS\n" quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" @@ -86,8 +87,7 @@ func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { // RenderNode is called for each node in a markdown document; based on the node // type the equivalent roff output is sent to the writer func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - - var walkAction = blackfriday.GoToNext + walkAction := blackfriday.GoToNext switch node.Type { case blackfriday.Text: @@ -109,9 +109,16 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, strongCloseTag) } case blackfriday.Link: - if !entering { - out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren case blackfriday.Image: // ignore images walkAction = blackfriday.SkipChildren @@ -160,6 +167,11 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering r.handleTableCell(w, node, entering) case blackfriday.HTMLSpan: // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte("