Merge pull request #866 from andyzhangx/CVE-2025-0426
fix: CVE-2025-0426
This commit is contained in:
commit
b1750037ed
116
go.mod
116
go.mod
@ -12,11 +12,11 @@ require (
|
||||
golang.org/x/net v0.37.0
|
||||
google.golang.org/grpc v1.71.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
k8s.io/api v0.28.12
|
||||
k8s.io/apimachinery v0.28.12
|
||||
k8s.io/client-go v0.28.12
|
||||
k8s.io/api v0.31.6
|
||||
k8s.io/apimachinery v0.31.6
|
||||
k8s.io/client-go v0.31.6
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kubernetes v1.28.12
|
||||
k8s.io/kubernetes v1.31.6
|
||||
k8s.io/mount-utils v0.32.0
|
||||
k8s.io/pod-security-admission v0.0.0
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
@ -26,77 +26,82 @@ require (
|
||||
|
||||
require go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
|
||||
require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.16.1 // indirect
|
||||
github.com/google/cel-go v0.20.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/spdystream v0.4.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/selinux v1.10.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.24.0
|
||||
@ -115,34 +120,35 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.0.0 // indirect
|
||||
k8s.io/apiserver v0.28.9 // indirect
|
||||
k8s.io/cloud-provider v0.28.9 // indirect
|
||||
k8s.io/component-base v0.28.9 // indirect
|
||||
k8s.io/component-helpers v0.28.9 // indirect
|
||||
k8s.io/controller-manager v0.28.9 // indirect
|
||||
k8s.io/kms v0.28.9 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
k8s.io/apiserver v0.31.6 // indirect
|
||||
k8s.io/cloud-provider v0.31.6 // indirect
|
||||
k8s.io/component-base v0.31.6 // indirect
|
||||
k8s.io/component-helpers v0.31.6 // indirect
|
||||
k8s.io/controller-manager v0.31.6 // indirect
|
||||
k8s.io/kms v0.31.6 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/kubectl v0.0.0 // indirect
|
||||
k8s.io/kubelet v0.28.9 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
|
||||
k8s.io/kubelet v0.31.6 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.9
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.9
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.9
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.9
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.9
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.28.9
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.9
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.9
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.9
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.9
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.28.9
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.28.9
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.9
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.9
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.9
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.31.6
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.31.6
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.31.6
|
||||
k8s.io/cri-client => k8s.io/cri-client v0.31.6
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.31.6
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.31.6
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.31.6
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.31.6
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.31.6
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.31.6
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.31.6
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.31.6
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.31.6
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.31.6
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.31.6
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.31.6
|
||||
)
|
||||
|
||||
232
go.sum
232
go.sum
@ -6,11 +6,7 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
|
||||
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
|
||||
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
@ -47,6 +43,8 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
@ -56,14 +54,12 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@ -71,8 +67,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
@ -82,8 +78,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySeApCX4GeOjPl9qhRF3QuIZq+Q=
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
|
||||
github.com/container-storage-interface/spec v1.11.0 h1:H/YKTOeUZwHtyPOr9raR+HgFmGluGCklulxDYxSdVNM=
|
||||
github.com/container-storage-interface/spec v1.11.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI=
|
||||
@ -91,38 +85,36 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
|
||||
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
@ -136,8 +128,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
@ -146,8 +138,9 @@ github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2Kv
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@ -159,8 +152,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
|
||||
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
@ -185,8 +176,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
|
||||
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@ -214,8 +205,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
@ -223,8 +214,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@ -267,10 +258,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8=
|
||||
github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
@ -287,6 +276,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@ -299,8 +289,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
|
||||
github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
@ -314,23 +304,23 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
@ -338,13 +328,13 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
@ -359,7 +349,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
@ -367,41 +356,43 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo=
|
||||
go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ=
|
||||
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
|
||||
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg=
|
||||
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
|
||||
go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
|
||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
||||
@ -410,22 +401,17 @@ go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@ -450,7 +436,6 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
@ -509,7 +494,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -551,7 +535,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
@ -617,6 +600,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
@ -631,7 +616,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
@ -642,62 +626,62 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw=
|
||||
k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ=
|
||||
k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ=
|
||||
k8s.io/apiextensions-apiserver v0.28.9 h1:yzPHp+4IASHeu7XIPkAKJrY4UjWdjiAjOcQMd6oNKj0=
|
||||
k8s.io/apiextensions-apiserver v0.28.9/go.mod h1:Rjhvq5y3JESdZgV2UOByldyefCfRrUguVpBLYOAIbVs=
|
||||
k8s.io/api v0.31.6 h1:ocWG/UhC9Mqp5oEfYWy9wCddbZiZyBAFTlBt0LVlhDg=
|
||||
k8s.io/api v0.31.6/go.mod h1:i16xSiKMgVIVhsJMxfWq0mJbXA+Z7KhjPgYmwT41hl4=
|
||||
k8s.io/apiextensions-apiserver v0.31.6 h1:v9sqyWlrgFZpAPdEb/bEiXfM98TfSppwRF0X/uWKXh0=
|
||||
k8s.io/apiextensions-apiserver v0.31.6/go.mod h1:QVH3CFwqzGZtwsxPYzJlA/Qiwgb5FXmRMGls3CjzvbI=
|
||||
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0=
|
||||
k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
|
||||
k8s.io/apiserver v0.28.9 h1:koPXvgSXRBDxKJQjJGdZNgPsT9lQv6scJJFipd1m86E=
|
||||
k8s.io/apiserver v0.28.9/go.mod h1:D51I37WBZojJhmLcjNVE4GSVrjiUHP+yq+N5KvKn2wY=
|
||||
k8s.io/apimachinery v0.31.6 h1:Pn96A0wHD0X8+l7QTdAzdLQPrpav1s8rU6A+v2/9UEY=
|
||||
k8s.io/apimachinery v0.31.6/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/apiserver v0.31.6 h1:FEhEGLsz1PbMOHeQZDbOUlMh36zRZbjgKwJCoMhdGmw=
|
||||
k8s.io/apiserver v0.31.6/go.mod h1:dpFh+xqFQ02O8vLYCIqoiV7sJIpZsUULeNuag6Y9HGo=
|
||||
k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
|
||||
k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec=
|
||||
k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE=
|
||||
k8s.io/cloud-provider v0.28.9 h1:FBW4Ii1NdXCHKprzkM8/s5BpxvLgJmYrZTNJABsVX7Y=
|
||||
k8s.io/cloud-provider v0.28.9/go.mod h1:7tFyiftAlSARvJS6mzZQQKKDQA81asNQ2usg35R3Exo=
|
||||
k8s.io/client-go v0.31.6 h1:51HT40qVIZ13BrHKeWxFuU52uoPnFhxTYJnv4+LTgp4=
|
||||
k8s.io/client-go v0.31.6/go.mod h1:MEq7JQJelUQ0/4fMoPEUrc/OOFyGo/9LmGA38H6O6xY=
|
||||
k8s.io/cloud-provider v0.31.6 h1:5vVMyf/m/n8ij/GmSJLRcatchmciRr0gs4peBcxqvKk=
|
||||
k8s.io/cloud-provider v0.31.6/go.mod h1:iT6kIEMEXrTIvRBAaRU5qefRzgPaSV6kwTc6mjhhnEw=
|
||||
k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y=
|
||||
k8s.io/component-base v0.28.9 h1:ySM2PR8Z/xaUSG1Akd3yM6dqUezTltI7S5aV41MMuuc=
|
||||
k8s.io/component-base v0.28.9/go.mod h1:QtWzscEhCKRfHV24/S+11BwWjVxhC6fd3RYoEgZcWFU=
|
||||
k8s.io/component-helpers v0.28.9 h1:knX9F2nRoxF4wplgXO4C5tE4/k7HGszK3177Tm4+CUc=
|
||||
k8s.io/component-helpers v0.28.9/go.mod h1:TdAkLbywEDE2CB5h8LbM/W03T3k8wvqAaoPcEZrr6Z4=
|
||||
k8s.io/controller-manager v0.28.9 h1:muAtmO2mDN7pDkAJQMknvWy+WQhkvvi/jK1V82+qbLw=
|
||||
k8s.io/controller-manager v0.28.9/go.mod h1:RYP65K6GWLRWYZR7PRRaStfvgeXkhCGZwJsxRPuaDV0=
|
||||
k8s.io/csi-translation-lib v0.28.9 h1:zl93l7wk0iwKInyRJfaodvsWf1z8QtWCN9a5OqHeT3o=
|
||||
k8s.io/csi-translation-lib v0.28.9/go.mod h1:eOniPQitdkuyVh+gtktg3yeDJQu/IidIUSMadDPLhak=
|
||||
k8s.io/component-base v0.31.6 h1:FgI25PuZtCp2n7AFpOaDpMQOLieFdrpAbpeoZu7VhDI=
|
||||
k8s.io/component-base v0.31.6/go.mod h1:aVRrh8lAI1kSShFmwcKLhc3msQoUcmFWPBDf0sXaISM=
|
||||
k8s.io/component-helpers v0.31.6 h1:Af8BcE6pElKlLaerwW9s04jTQVFa66wmI1pkaNfDWzE=
|
||||
k8s.io/component-helpers v0.31.6/go.mod h1:6CRV6M+7R13eqtz4FBm2ty9eH+QajDcP3y0Bklzh2FA=
|
||||
k8s.io/controller-manager v0.31.6 h1:HQRUV6nogHo2N7vr3cgVNjZ+wvHIMvxEMjTeCrHitE4=
|
||||
k8s.io/controller-manager v0.31.6/go.mod h1:0HDNTZVapQFa9G96jNxrU99ht7fQJVEKBXDzqKDMez0=
|
||||
k8s.io/csi-translation-lib v0.31.6 h1:mBkF3AG8pRcwZv8SY7qT1JWznRsmYjZfT5Lxel9nN4Q=
|
||||
k8s.io/csi-translation-lib v0.31.6/go.mod h1:I2F51irYJyt78so7wdral65B7PB7jR3keZ2MpB78mWw=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.28.9 h1:ApCWJulBl+uFRTr2jtTpG1lffmqqMuLnOH/RUbtO4UY=
|
||||
k8s.io/kms v0.28.9/go.mod h1:VgyAIRMFqZX9lHyixecU/JTI0wnPD1wCIlquvlXRJ+Y=
|
||||
k8s.io/kms v0.31.6 h1:p7OY+9Hp8nPtgzm0vT9TrERNigQQSu8tkgWqn+GvB2w=
|
||||
k8s.io/kms v0.31.6/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/kubectl v0.28.9 h1:FTf/aapuuFxPmt8gYUeqUmcsgG0gKC2ei6n+TO5sGOw=
|
||||
k8s.io/kubectl v0.28.9/go.mod h1:ip/zTUr1MM/H2M+YbPHnSKLt0x6kb85SJtRSjwEGDfs=
|
||||
k8s.io/kubelet v0.28.9 h1:76v00fFLeniz27kXhGGUIxONdwa9LKcD2Jd5cXYAZko=
|
||||
k8s.io/kubelet v0.28.9/go.mod h1:46P39DFjI+E59nU2OgpatyS3oWy58ClulKO6riZ/97o=
|
||||
k8s.io/kubernetes v1.28.12 h1:DtWB8ZjoYiN/PXD4qDXFppf9IouVUavn6r3S+3NMUkU=
|
||||
k8s.io/kubernetes v1.28.12/go.mod h1:chlmcCDBnOA/y+572cw8dO0Rci1wiA8bm5+zhPdFLCk=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/kubectl v0.31.6 h1:ngzql/UugqpEbeeyQX678BlVHXks19JR3CFjwKnWuFI=
|
||||
k8s.io/kubectl v0.31.6/go.mod h1:m6OXbx9s0sZiaZrfHHSEmJUD5CjWPA5+cVg0GZnVdzM=
|
||||
k8s.io/kubelet v0.31.6 h1:lxVvyLNDcb/QTpQNkDySk3iscgq4zubeSZs3cF6PmaA=
|
||||
k8s.io/kubelet v0.31.6/go.mod h1:BPghO52ilF7UzFEVBmYFOxdVtLge0P1gixjz84lBzzc=
|
||||
k8s.io/kubernetes v1.31.6 h1:zVhgWDFHmIj51o5sNARmjdgNvpq4K2Smya8pS5vxqlc=
|
||||
k8s.io/kubernetes v1.31.6/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs=
|
||||
k8s.io/mount-utils v0.32.0 h1:KOQAhPzJICATXnc6XCkWoexKbkOexRnMCUW8APFfwg4=
|
||||
k8s.io/mount-utils v0.32.0/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0=
|
||||
k8s.io/pod-security-admission v0.28.9 h1:towoNqSp7aU7gF8T89zftCuQUfliyib3ds20Kz/hysg=
|
||||
k8s.io/pod-security-admission v0.28.9/go.mod h1:mfEhECQ+AvP+zehqxemSq1pDL4YLoWCP7liL0YmkpZY=
|
||||
k8s.io/pod-security-admission v0.31.6 h1:5WnXyl+UNmQb73O0L1w82uaUEPuvp+sxdhXRiOLdCkY=
|
||||
k8s.io/pod-security-admission v0.31.6/go.mod h1:b+ZpSSR+XMx3t9Pvy/GdcXoI0CEpiWGT7IGAhcOBcGM=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.9 h1:OAF8cQubrNUEiMNbnDFowRl6jciWTt3DqI9FhWGcnpE=
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.9/go.mod h1:63ByXruYF4XWLdOIRxtSz6RYel5PpdKRsCPKIj4Io58=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
|
||||
@ -109,8 +109,6 @@ var _ = ginkgo.BeforeSuite(func() {
|
||||
kubeconfig := filepath.Join(os.Getenv("HOME"), ".kube", "config")
|
||||
os.Setenv(kubeconfigEnvVar, kubeconfig)
|
||||
}
|
||||
handleFlags()
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
|
||||
options := nfs.DriverOptions{
|
||||
NodeID: nodeID,
|
||||
@ -215,6 +213,12 @@ func execTestCmd(cmds []testCmd) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
handleFlags()
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
gomega.RegisterFailHandler(ginkgo.Fail)
|
||||
ginkgo.RunSpecs(t, "E2E Suite")
|
||||
|
||||
@ -157,7 +157,7 @@ func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.Pe
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
|
||||
},
|
||||
@ -409,7 +409,7 @@ func (t *TestPod) Create(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (t *TestPod) WaitForSuccess(ctx context.Context) {
|
||||
err := e2epod.WaitForPodSuccessInNamespaceSlow(ctx, t.client, t.pod.Name, t.namespace.Name)
|
||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, t.client, t.pod.Name, t.namespace.Name, 15*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
||||
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
68
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
/*
|
||||
Package antlr implements the Go version of the ANTLR 4 runtime.
|
||||
|
||||
# The ANTLR Tool
|
||||
|
||||
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
|
||||
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
|
||||
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
|
||||
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
# Code Generation
|
||||
|
||||
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
|
||||
runtime library, written specifically to support the generated code in the target language. This library is the
|
||||
runtime for the Go target.
|
||||
|
||||
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
|
||||
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
|
||||
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
|
||||
that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
|
||||
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
|
||||
your IDE, or configuration in your CI system.
|
||||
|
||||
Here is a general template for an ANTLR based recognizer in Go:
|
||||
|
||||
.
|
||||
├── myproject
|
||||
├── parser
|
||||
│ ├── mygrammar.g4
|
||||
│ ├── antlr-4.12.0-complete.jar
|
||||
│ ├── error_listeners.go
|
||||
│ ├── generate.go
|
||||
│ ├── generate.sh
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── main.go
|
||||
└── main_test.go
|
||||
|
||||
Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
|
||||
The generate.go file then looks like this:
|
||||
|
||||
package parser
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
And the generate.sh file will look similar to this:
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
|
||||
|
||||
depending on whether you want visitors or listeners or any other ANTLR options.
|
||||
|
||||
From the command line at the root of your package “myproject” you can then simply issue the command:
|
||||
|
||||
go generate ./...
|
||||
|
||||
# Copyright Notice
|
||||
|
||||
Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
|
||||
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
|
||||
|
||||
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
|
||||
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
|
||||
*/
|
||||
package antlr
|
||||
303
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
generated
vendored
303
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
generated
vendored
@ -1,303 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive at the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig interface {
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Hash() int
|
||||
|
||||
GetState() ATNState
|
||||
GetAlt() int
|
||||
GetSemanticContext() SemanticContext
|
||||
|
||||
GetContext() PredictionContext
|
||||
SetContext(PredictionContext)
|
||||
|
||||
GetReachesIntoOuterContext() int
|
||||
SetReachesIntoOuterContext(int)
|
||||
|
||||
String() string
|
||||
|
||||
getPrecedenceFilterSuppressed() bool
|
||||
setPrecedenceFilterSuppressed(bool)
|
||||
}
|
||||
|
||||
type BaseATNConfig struct {
|
||||
precedenceFilterSuppressed bool
|
||||
state ATNState
|
||||
alt int
|
||||
context PredictionContext
|
||||
semanticContext SemanticContext
|
||||
reachesIntoOuterContext int
|
||||
}
|
||||
|
||||
func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
|
||||
return &BaseATNConfig{
|
||||
state: old.state,
|
||||
alt: old.alt,
|
||||
context: old.context,
|
||||
semanticContext: old.semanticContext,
|
||||
reachesIntoOuterContext: old.reachesIntoOuterContext,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Necessary?
|
||||
}
|
||||
|
||||
return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
|
||||
}
|
||||
|
||||
func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
|
||||
}
|
||||
|
||||
func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
|
||||
return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
|
||||
}
|
||||
|
||||
func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil")
|
||||
}
|
||||
|
||||
return &BaseATNConfig{
|
||||
state: state,
|
||||
alt: c.GetAlt(),
|
||||
context: context,
|
||||
semanticContext: semanticContext,
|
||||
reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
|
||||
precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
|
||||
return b.precedenceFilterSuppressed
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
|
||||
b.precedenceFilterSuppressed = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetState() ATNState {
|
||||
return b.state
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetAlt() int {
|
||||
return b.alt
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) SetContext(v PredictionContext) {
|
||||
b.context = v
|
||||
}
|
||||
func (b *BaseATNConfig) GetContext() PredictionContext {
|
||||
return b.context
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
|
||||
return b.semanticContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
|
||||
return b.reachesIntoOuterContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
b.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
|
||||
if b == o {
|
||||
return true
|
||||
} else if o == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var other, ok = o.(*BaseATNConfig)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
var equal bool
|
||||
|
||||
if b.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = b.context.Equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = b.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = b.alt == other.alt
|
||||
cons = b.semanticContext.Equals(other.semanticContext)
|
||||
sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
// Hash is the default hash function for BaseATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (b *BaseATNConfig) Hash() int {
|
||||
var c int
|
||||
if b.context != nil {
|
||||
c = b.context.Hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, b.state.GetStateNumber())
|
||||
h = murmurUpdate(h, b.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, b.semanticContext.Hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfig) String() string {
|
||||
var s1, s2, s3 string
|
||||
|
||||
if b.context != nil {
|
||||
s1 = ",[" + fmt.Sprint(b.context) + "]"
|
||||
}
|
||||
|
||||
if b.semanticContext != SemanticContextNone {
|
||||
s2 = "," + fmt.Sprint(b.semanticContext)
|
||||
}
|
||||
|
||||
if b.reachesIntoOuterContext > 0 {
|
||||
s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
|
||||
}
|
||||
|
||||
type LexerATNConfig struct {
|
||||
*BaseATNConfig
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
passedThroughNonGreedyDecision bool
|
||||
}
|
||||
|
||||
func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
|
||||
lexerActionExecutor: lexerActionExecutor,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
||||
lexerActionExecutor: c.lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
|
||||
lexerActionExecutor: lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{
|
||||
BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
|
||||
lexerActionExecutor: c.lexerActionExecutor,
|
||||
passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
|
||||
}
|
||||
}
|
||||
|
||||
func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
|
||||
return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
|
||||
}
|
||||
|
||||
// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Hash() int {
|
||||
var f int
|
||||
if l.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
} else {
|
||||
f = 0
|
||||
}
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, l.state.GetStateNumber())
|
||||
h = murmurUpdate(h, l.alt)
|
||||
h = murmurUpdate(h, l.context.Hash())
|
||||
h = murmurUpdate(h, l.semanticContext.Hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, l.lexerActionExecutor.Hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
|
||||
if l == other {
|
||||
return true
|
||||
}
|
||||
var othert, ok = other.(*LexerATNConfig)
|
||||
|
||||
if l == other {
|
||||
return true
|
||||
} else if !ok {
|
||||
return false
|
||||
} else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
|
||||
return false
|
||||
}
|
||||
|
||||
var b bool
|
||||
|
||||
if l.lexerActionExecutor != nil {
|
||||
b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
|
||||
} else {
|
||||
b = othert.lexerActionExecutor != nil
|
||||
}
|
||||
|
||||
if b {
|
||||
return false
|
||||
}
|
||||
|
||||
return l.BaseATNConfig.Equals(othert.BaseATNConfig)
|
||||
}
|
||||
|
||||
func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
|
||||
}
|
||||
441
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
generated
vendored
441
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
generated
vendored
@ -1,441 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ATNConfigSet interface {
|
||||
Hash() int
|
||||
Equals(o Collectable[ATNConfig]) bool
|
||||
Add(ATNConfig, *DoubleDict) bool
|
||||
AddAll([]ATNConfig) bool
|
||||
|
||||
GetStates() *JStore[ATNState, Comparator[ATNState]]
|
||||
GetPredicates() []SemanticContext
|
||||
GetItems() []ATNConfig
|
||||
|
||||
OptimizeConfigs(interpreter *BaseATNSimulator)
|
||||
|
||||
Length() int
|
||||
IsEmpty() bool
|
||||
Contains(ATNConfig) bool
|
||||
ContainsFast(ATNConfig) bool
|
||||
Clear()
|
||||
String() string
|
||||
|
||||
HasSemanticContext() bool
|
||||
SetHasSemanticContext(v bool)
|
||||
|
||||
ReadOnly() bool
|
||||
SetReadOnly(bool)
|
||||
|
||||
GetConflictingAlts() *BitSet
|
||||
SetConflictingAlts(*BitSet)
|
||||
|
||||
Alts() *BitSet
|
||||
|
||||
FullContext() bool
|
||||
|
||||
GetUniqueAlt() int
|
||||
SetUniqueAlt(int)
|
||||
|
||||
GetDipsIntoOuterContext() bool
|
||||
SetDipsIntoOuterContext(bool)
|
||||
}
|
||||
|
||||
// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
|
||||
// about its elements and can combine similar configurations using a
|
||||
// graph-structured stack.
|
||||
type BaseATNConfigSet struct {
|
||||
cachedHash int
|
||||
|
||||
// configLookup is used to determine whether two BaseATNConfigSets are equal. We
|
||||
// need all configurations with the same (s, i, _, semctx) to be equal. A key
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
|
||||
|
||||
// configs is the added elements.
|
||||
configs []ATNConfig
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves recomputation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
conflictingAlts *BitSet
|
||||
|
||||
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
|
||||
// we hit a pred while computing a closure operation. Do not make a DFA state
|
||||
// from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
|
||||
dipsIntoOuterContext bool
|
||||
|
||||
// fullCtx is whether it is part of a full context LL prediction. Used to
|
||||
// determine how to merge $. It is a wildcard with SLL, but not for an LL
|
||||
// context merge.
|
||||
fullCtx bool
|
||||
|
||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
||||
// while computing a closure operation. Don't make a DFA state from a.
|
||||
hasSemanticContext bool
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not, protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves recomputation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
uniqueAlt int
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Alts() *BitSet {
|
||||
alts := NewBitSet()
|
||||
for _, it := range b.configs {
|
||||
alts.add(it.GetAlt())
|
||||
}
|
||||
return alts
|
||||
}
|
||||
|
||||
func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
|
||||
return &BaseATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
|
||||
// Add merges contexts with existing configs for (s, i, pi, _), where s is the
|
||||
// ATNConfig.state, i is the ATNConfig.alt, and pi is the
|
||||
// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
|
||||
// dipsIntoOuterContext and hasSemanticContext when necessary.
|
||||
func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if config.GetSemanticContext() != SemanticContextNone {
|
||||
b.hasSemanticContext = true
|
||||
}
|
||||
|
||||
if config.GetReachesIntoOuterContext() > 0 {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing, present := b.configLookup.Put(config)
|
||||
|
||||
// The config was not already in the set
|
||||
//
|
||||
if !present {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge a previous (s, i, pi, _) with it and save the result
|
||||
rootIsWildcard := !b.fullCtx
|
||||
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
|
||||
|
||||
// No need to check for existing.context because config.context is in the cache,
|
||||
// since the only way to create new graphs is the "call rule" and here. We cache
|
||||
// at both places.
|
||||
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
|
||||
|
||||
// Preserve the precedence filter suppression during the merge
|
||||
if config.getPrecedenceFilterSuppressed() {
|
||||
existing.setPrecedenceFilterSuppressed(true)
|
||||
}
|
||||
|
||||
// Replace the context because there is no need to do alt mapping
|
||||
existing.SetContext(merged)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
|
||||
|
||||
// states uses the standard comparator provided by the ATNState instance
|
||||
//
|
||||
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Put(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) HasSemanticContext() bool {
|
||||
return b.hasSemanticContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
|
||||
b.hasSemanticContext = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
|
||||
preds := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
c := b.configs[i].GetSemanticContext()
|
||||
|
||||
if c != SemanticContextNone {
|
||||
preds = append(preds, c)
|
||||
}
|
||||
}
|
||||
|
||||
return preds
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetItems() []ATNConfig {
|
||||
return b.configs
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if b.configLookup.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
config := b.configs[i]
|
||||
|
||||
config.SetContext(interpreter.getCachedContext(config.GetContext()))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
|
||||
for i := 0; i < len(coll); i++ {
|
||||
b.Add(coll[i], nil)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare is a hack function just to verify that adding DFAstares to the known
|
||||
// set works, so long as comparison of ATNConfigSet s works. For that to work, we
|
||||
// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
|
||||
// know the order, so we do this inefficient hack. If this proves the point, then
|
||||
// we can change the config set to a better structure.
|
||||
func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
|
||||
if len(b.configs) != len(bs.configs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range b.configs {
|
||||
found := false
|
||||
for _, c2 := range bs.configs {
|
||||
if c.Equals(c2) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*BaseATNConfigSet); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
other2 := other.(*BaseATNConfigSet)
|
||||
|
||||
return b.configs != nil &&
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
b.conflictingAlts == other2.conflictingAlts &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
|
||||
b.Compare(other2)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
return b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.Hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Length() int {
|
||||
return len(b.configs)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) IsEmpty() bool {
|
||||
return len(b.configs) == 0
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
|
||||
if b.configLookup == nil {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
|
||||
return b.configLookup.Contains(item)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
|
||||
if b.configLookup == nil {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
|
||||
return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) Clear() {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
b.configs = make([]ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) FullContext() bool {
|
||||
return b.fullCtx
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
|
||||
return b.dipsIntoOuterContext
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
|
||||
b.dipsIntoOuterContext = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetUniqueAlt() int {
|
||||
return b.uniqueAlt
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
|
||||
b.uniqueAlt = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
|
||||
return b.conflictingAlts
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
|
||||
b.conflictingAlts = v
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) ReadOnly() bool {
|
||||
return b.readOnly
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
|
||||
b.readOnly = readOnly
|
||||
|
||||
if readOnly {
|
||||
b.configLookup = nil // Read only, so no need for the lookup cache
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseATNConfigSet) String() string {
|
||||
s := "["
|
||||
|
||||
for i, c := range b.configs {
|
||||
s += c.String()
|
||||
|
||||
if i != len(b.configs)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
|
||||
s += "]"
|
||||
|
||||
if b.hasSemanticContext {
|
||||
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
|
||||
}
|
||||
|
||||
if b.uniqueAlt != ATNInvalidAltNumber {
|
||||
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
|
||||
}
|
||||
|
||||
if b.conflictingAlts != nil {
|
||||
s += ",conflictingAlts=" + b.conflictingAlts.String()
|
||||
}
|
||||
|
||||
if b.dipsIntoOuterContext {
|
||||
s += ",dipsIntoOuterContext"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type OrderedATNConfigSet struct {
|
||||
*BaseATNConfigSet
|
||||
}
|
||||
|
||||
func NewOrderedATNConfigSet() *OrderedATNConfigSet {
|
||||
b := NewBaseATNConfigSet(false)
|
||||
|
||||
// This set uses the standard Hash() and Equals() from ATNConfig
|
||||
b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
|
||||
return &OrderedATNConfigSet{BaseATNConfigSet: b}
|
||||
}
|
||||
|
||||
func hashATNConfig(i interface{}) int {
|
||||
o := i.(ATNConfig)
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
hash = 31*hash + o.GetSemanticContext().Hash()
|
||||
return hash
|
||||
}
|
||||
|
||||
func equalATNConfigs(a, b interface{}) bool {
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
|
||||
var ai, ok = a.(ATNConfig)
|
||||
var bi, ok1 = b.(ATNConfig)
|
||||
|
||||
if !ok || !ok1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
|
||||
return false
|
||||
}
|
||||
|
||||
if ai.GetAlt() != bi.GetAlt() {
|
||||
return false
|
||||
}
|
||||
|
||||
return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
|
||||
}
|
||||
113
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
generated
vendored
113
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
generated
vendored
@ -1,113 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
type InputStream struct {
|
||||
name string
|
||||
index int
|
||||
data []rune
|
||||
size int
|
||||
}
|
||||
|
||||
func NewInputStream(data string) *InputStream {
|
||||
|
||||
is := new(InputStream)
|
||||
|
||||
is.name = "<empty>"
|
||||
is.index = 0
|
||||
is.data = []rune(data)
|
||||
is.size = len(is.data) // number of runes
|
||||
|
||||
return is
|
||||
}
|
||||
|
||||
func (is *InputStream) reset() {
|
||||
is.index = 0
|
||||
}
|
||||
|
||||
func (is *InputStream) Consume() {
|
||||
if is.index >= is.size {
|
||||
// assert is.LA(1) == TokenEOF
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
is.index++
|
||||
}
|
||||
|
||||
func (is *InputStream) LA(offset int) int {
|
||||
|
||||
if offset == 0 {
|
||||
return 0 // nil
|
||||
}
|
||||
if offset < 0 {
|
||||
offset++ // e.g., translate LA(-1) to use offset=0
|
||||
}
|
||||
pos := is.index + offset - 1
|
||||
|
||||
if pos < 0 || pos >= is.size { // invalid
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
return int(is.data[pos])
|
||||
}
|
||||
|
||||
func (is *InputStream) LT(offset int) int {
|
||||
return is.LA(offset)
|
||||
}
|
||||
|
||||
func (is *InputStream) Index() int {
|
||||
return is.index
|
||||
}
|
||||
|
||||
func (is *InputStream) Size() int {
|
||||
return is.size
|
||||
}
|
||||
|
||||
// mark/release do nothing we have entire buffer
|
||||
func (is *InputStream) Mark() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (is *InputStream) Release(marker int) {
|
||||
}
|
||||
|
||||
func (is *InputStream) Seek(index int) {
|
||||
if index <= is.index {
|
||||
is.index = index // just jump don't update stream state (line,...)
|
||||
return
|
||||
}
|
||||
// seek forward
|
||||
is.index = intMin(index, is.size)
|
||||
}
|
||||
|
||||
func (is *InputStream) GetText(start int, stop int) string {
|
||||
if stop >= is.size {
|
||||
stop = is.size - 1
|
||||
}
|
||||
if start >= is.size {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(is.data[start : stop+1])
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
|
||||
if start != nil && stop != nil {
|
||||
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromInterval(i *Interval) string {
|
||||
return is.GetText(i.Start, i.Stop)
|
||||
}
|
||||
|
||||
func (*InputStream) GetSourceName() string {
|
||||
return "Obtained from string"
|
||||
}
|
||||
|
||||
func (is *InputStream) String() string {
|
||||
return string(is.data)
|
||||
}
|
||||
198
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
generated
vendored
198
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
generated
vendored
@ -1,198 +0,0 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Collectable is an interface that a struct should implement if it is to be
|
||||
// usable as a key in these collections.
|
||||
type Collectable[T any] interface {
|
||||
Hash() int
|
||||
Equals(other Collectable[T]) bool
|
||||
}
|
||||
|
||||
type Comparator[T any] interface {
|
||||
Hash1(o T) int
|
||||
Equals2(T, T) bool
|
||||
}
|
||||
|
||||
// JStore implements a container that allows the use of a struct to calculate the key
|
||||
// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
|
||||
// serve the needs of the ANTLR Go runtime.
|
||||
//
|
||||
// For ease of porting the logic of the runtime from the master target (Java), this collection
|
||||
// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
|
||||
// function as the key. The values are stored in a standard go map which internally is a form of hashmap
|
||||
// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
|
||||
// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
|
||||
// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
|
||||
// we understand the requirements, then this is fine - this is not a general purpose collection.
|
||||
type JStore[T any, C Comparator[T]] struct {
|
||||
store map[int][]T
|
||||
len int
|
||||
comparator Comparator[T]
|
||||
}
|
||||
|
||||
func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
|
||||
|
||||
if comparator == nil {
|
||||
panic("comparator cannot be nil")
|
||||
}
|
||||
|
||||
s := &JStore[T, C]{
|
||||
store: make(map[int][]T, 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Put will store given value in the collection. Note that the key for storage is generated from
|
||||
// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
|
||||
// as any kind of general collection.
|
||||
//
|
||||
// If the key has a hash conflict, then the value will be added to the slice of values associated with the
|
||||
// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
|
||||
// tested by calling the equals() method on the key.
|
||||
//
|
||||
// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
|
||||
//
|
||||
// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
|
||||
func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
|
||||
|
||||
kh := s.comparator.Hash1(value)
|
||||
|
||||
for _, v1 := range s.store[kh] {
|
||||
if s.comparator.Equals2(value, v1) {
|
||||
return v1, true
|
||||
}
|
||||
}
|
||||
s.store[kh] = append(s.store[kh], value)
|
||||
s.len++
|
||||
return value, false
|
||||
}
|
||||
|
||||
// Get will return the value associated with the key - the type of the key is the same type as the value
|
||||
// which would not generally be useful, but this is a specific thing for ANTLR where the key is
|
||||
// generated using the object we are going to store.
|
||||
func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
|
||||
|
||||
kh := s.comparator.Hash1(key)
|
||||
|
||||
for _, v := range s.store[kh] {
|
||||
if s.comparator.Equals2(key, v) {
|
||||
return v, true
|
||||
}
|
||||
}
|
||||
return key, false
|
||||
}
|
||||
|
||||
// Contains returns true if the given key is present in the store
|
||||
func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
|
||||
|
||||
_, present := s.Get(key)
|
||||
return present
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, v := range s.store {
|
||||
vs = append(vs, v...)
|
||||
}
|
||||
sort.Slice(vs, func(i, j int) bool {
|
||||
return less(vs[i], vs[j])
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Each(f func(T) bool) {
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Len() int {
|
||||
return s.len
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Values() []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
type entry[K, V any] struct {
|
||||
key K
|
||||
val V
|
||||
}
|
||||
|
||||
type JMap[K, V any, C Comparator[K]] struct {
|
||||
store map[int][]*entry[K, V]
|
||||
len int
|
||||
comparator Comparator[K]
|
||||
}
|
||||
|
||||
func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
|
||||
return &JMap[K, V, C]{
|
||||
store: make(map[int][]*entry[K, V], 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Put(key K, val V) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
|
||||
m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
|
||||
m.len++
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Values() []V {
|
||||
vs := make([]V, 0, len(m.store))
|
||||
for _, e := range m.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v.val)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Get(key K) (V, bool) {
|
||||
|
||||
var none V
|
||||
kh := m.comparator.Hash1(key)
|
||||
for _, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
return e.val, true
|
||||
}
|
||||
}
|
||||
return none, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Len() int {
|
||||
return len(m.store)
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Delete(key K) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
for i, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
|
||||
m.len--
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Clear() {
|
||||
m.store = make(map[int][]*entry[K, V])
|
||||
}
|
||||
806
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
generated
vendored
806
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
generated
vendored
@ -1,806 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/exp/slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Represents {@code $} in local context prediction, which means wildcard.
|
||||
// {@code//+x =//}.
|
||||
// /
|
||||
const (
|
||||
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
|
||||
)
|
||||
|
||||
// Represents {@code $} in an array in full context mode, when {@code $}
|
||||
// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
|
||||
// {@code $} = {@link //EmptyReturnState}.
|
||||
// /
|
||||
|
||||
var (
|
||||
BasePredictionContextglobalNodeCount = 1
|
||||
BasePredictionContextid = BasePredictionContextglobalNodeCount
|
||||
)
|
||||
|
||||
type PredictionContext interface {
|
||||
Hash() int
|
||||
Equals(interface{}) bool
|
||||
GetParent(int) PredictionContext
|
||||
getReturnState(int) int
|
||||
length() int
|
||||
isEmpty() bool
|
||||
hasEmptyPath() bool
|
||||
String() string
|
||||
}
|
||||
|
||||
type BasePredictionContext struct {
|
||||
cachedHash int
|
||||
}
|
||||
|
||||
func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
|
||||
pc := new(BasePredictionContext)
|
||||
pc.cachedHash = cachedHash
|
||||
|
||||
return pc
|
||||
}
|
||||
|
||||
func (b *BasePredictionContext) isEmpty() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func calculateHash(parent PredictionContext, returnState int) int {
|
||||
h := murmurInit(1)
|
||||
h = murmurUpdate(h, parent.Hash())
|
||||
h = murmurUpdate(h, returnState)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
var _emptyPredictionContextHash int
|
||||
|
||||
func init() {
|
||||
_emptyPredictionContextHash = murmurInit(1)
|
||||
_emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
|
||||
}
|
||||
|
||||
func calculateEmptyHash() int {
|
||||
return _emptyPredictionContextHash
|
||||
}
|
||||
|
||||
// Used to cache {@link BasePredictionContext} objects. Its used for the shared
|
||||
// context cash associated with contexts in DFA states. This cache
|
||||
// can be used for both lexers and parsers.
|
||||
|
||||
type PredictionContextCache struct {
|
||||
cache map[PredictionContext]PredictionContext
|
||||
}
|
||||
|
||||
func NewPredictionContextCache() *PredictionContextCache {
|
||||
t := new(PredictionContextCache)
|
||||
t.cache = make(map[PredictionContext]PredictionContext)
|
||||
return t
|
||||
}
|
||||
|
||||
// Add a context to the cache and return it. If the context already exists,
|
||||
// return that one instead and do not add a Newcontext to the cache.
|
||||
// Protect shared cache from unsafe thread access.
|
||||
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
|
||||
if ctx == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
existing := p.cache[ctx]
|
||||
if existing != nil {
|
||||
return existing
|
||||
}
|
||||
p.cache[ctx] = ctx
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
|
||||
return p.cache[ctx]
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) length() int {
|
||||
return len(p.cache)
|
||||
}
|
||||
|
||||
type SingletonPredictionContext interface {
|
||||
PredictionContext
|
||||
}
|
||||
|
||||
type BaseSingletonPredictionContext struct {
|
||||
*BasePredictionContext
|
||||
|
||||
parentCtx PredictionContext
|
||||
returnState int
|
||||
}
|
||||
|
||||
func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
|
||||
var cachedHash int
|
||||
if parent != nil {
|
||||
cachedHash = calculateHash(parent, returnState)
|
||||
} else {
|
||||
cachedHash = calculateEmptyHash()
|
||||
}
|
||||
|
||||
s := new(BaseSingletonPredictionContext)
|
||||
s.BasePredictionContext = NewBasePredictionContext(cachedHash)
|
||||
|
||||
s.parentCtx = parent
|
||||
s.returnState = returnState
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
|
||||
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
|
||||
// someone can pass in the bits of an array ctx that mean $
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
|
||||
return NewBaseSingletonPredictionContext(parent, returnState)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) length() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
|
||||
return b.parentCtx
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
|
||||
return b.returnState
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
|
||||
return b.returnState == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) Hash() int {
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
|
||||
if b == other {
|
||||
return true
|
||||
}
|
||||
if _, ok := other.(*BaseSingletonPredictionContext); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
otherP := other.(*BaseSingletonPredictionContext)
|
||||
|
||||
if b.returnState != otherP.getReturnState(0) {
|
||||
return false
|
||||
}
|
||||
if b.parentCtx == nil {
|
||||
return otherP.parentCtx == nil
|
||||
}
|
||||
|
||||
return b.parentCtx.Equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (b *BaseSingletonPredictionContext) String() string {
|
||||
var up string
|
||||
|
||||
if b.parentCtx == nil {
|
||||
up = ""
|
||||
} else {
|
||||
up = b.parentCtx.String()
|
||||
}
|
||||
|
||||
if len(up) == 0 {
|
||||
if b.returnState == BasePredictionContextEmptyReturnState {
|
||||
return "$"
|
||||
}
|
||||
|
||||
return strconv.Itoa(b.returnState)
|
||||
}
|
||||
|
||||
return strconv.Itoa(b.returnState) + " " + up
|
||||
}
|
||||
|
||||
var BasePredictionContextEMPTY = NewEmptyPredictionContext()
|
||||
|
||||
type EmptyPredictionContext struct {
|
||||
*BaseSingletonPredictionContext
|
||||
}
|
||||
|
||||
func NewEmptyPredictionContext() *EmptyPredictionContext {
|
||||
|
||||
p := new(EmptyPredictionContext)
|
||||
|
||||
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
|
||||
p.cachedHash = calculateEmptyHash()
|
||||
return p
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) isEmpty() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) getReturnState(index int) int {
|
||||
return e.returnState
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) Hash() int {
|
||||
return e.cachedHash
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) Equals(other interface{}) bool {
|
||||
return e == other
|
||||
}
|
||||
|
||||
func (e *EmptyPredictionContext) String() string {
|
||||
return "$"
|
||||
}
|
||||
|
||||
type ArrayPredictionContext struct {
|
||||
*BasePredictionContext
|
||||
|
||||
parents []PredictionContext
|
||||
returnStates []int
|
||||
}
|
||||
|
||||
func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
|
||||
// Parent can be nil only if full ctx mode and we make an array
|
||||
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
|
||||
// nil parent and
|
||||
// returnState == {@link //EmptyReturnState}.
|
||||
hash := murmurInit(1)
|
||||
|
||||
for _, parent := range parents {
|
||||
hash = murmurUpdate(hash, parent.Hash())
|
||||
}
|
||||
|
||||
for _, returnState := range returnStates {
|
||||
hash = murmurUpdate(hash, returnState)
|
||||
}
|
||||
|
||||
hash = murmurFinish(hash, len(parents)<<1)
|
||||
|
||||
c := new(ArrayPredictionContext)
|
||||
c.BasePredictionContext = NewBasePredictionContext(hash)
|
||||
|
||||
c.parents = parents
|
||||
c.returnStates = returnStates
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) GetReturnStates() []int {
|
||||
return a.returnStates
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) hasEmptyPath() bool {
|
||||
return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) isEmpty() bool {
|
||||
// since EmptyReturnState can only appear in the last position, we
|
||||
// don't need to verify that size==1
|
||||
return a.returnStates[0] == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) length() int {
|
||||
return len(a.returnStates)
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
|
||||
return a.parents[index]
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) getReturnState(index int) int {
|
||||
return a.returnStates[index]
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for ArrayPredictionContext when no specialized
|
||||
// implementation is needed for a collection
|
||||
func (a *ArrayPredictionContext) Equals(o interface{}) bool {
|
||||
if a == o {
|
||||
return true
|
||||
}
|
||||
other, ok := o.(*ArrayPredictionContext)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if a.cachedHash != other.Hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
// Must compare the actual array elements and not just the array address
|
||||
//
|
||||
return slices.Equal(a.returnStates, other.returnStates) &&
|
||||
slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
|
||||
return x.Equals(y)
|
||||
})
|
||||
}
|
||||
|
||||
// Hash is the default hash function for ArrayPredictionContext when no specialized
|
||||
// implementation is needed for a collection
|
||||
func (a *ArrayPredictionContext) Hash() int {
|
||||
return a.BasePredictionContext.cachedHash
|
||||
}
|
||||
|
||||
func (a *ArrayPredictionContext) String() string {
|
||||
if a.isEmpty() {
|
||||
return "[]"
|
||||
}
|
||||
|
||||
s := "["
|
||||
for i := 0; i < len(a.returnStates); i++ {
|
||||
if i > 0 {
|
||||
s = s + ", "
|
||||
}
|
||||
if a.returnStates[i] == BasePredictionContextEmptyReturnState {
|
||||
s = s + "$"
|
||||
continue
|
||||
}
|
||||
s = s + strconv.Itoa(a.returnStates[i])
|
||||
if a.parents[i] != nil {
|
||||
s = s + " " + a.parents[i].String()
|
||||
} else {
|
||||
s = s + "nil"
|
||||
}
|
||||
}
|
||||
|
||||
return s + "]"
|
||||
}
|
||||
|
||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
|
||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
|
||||
// /
|
||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
|
||||
if outerContext == nil {
|
||||
outerContext = ParserRuleContextEmpty
|
||||
}
|
||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
||||
if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
// If we have a parent, convert it to a BasePredictionContext graph
|
||||
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
|
||||
state := a.states[outerContext.GetInvokingState()]
|
||||
transition := state.GetTransitions()[0]
|
||||
|
||||
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
|
||||
}
|
||||
|
||||
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
|
||||
// Share same graph if both same
|
||||
//
|
||||
if a == b || a.Equals(b) {
|
||||
return a
|
||||
}
|
||||
|
||||
// In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
|
||||
// in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
|
||||
// from it.
|
||||
// In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
|
||||
// will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
|
||||
// either of them.
|
||||
|
||||
ac, ok1 := a.(*BaseSingletonPredictionContext)
|
||||
bc, ok2 := b.(*BaseSingletonPredictionContext)
|
||||
|
||||
if ok1 && ok2 {
|
||||
return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
|
||||
}
|
||||
// At least one of a or b is array
|
||||
// If one is $ and rootIsWildcard, return $ as// wildcard
|
||||
if rootIsWildcard {
|
||||
if _, ok := a.(*EmptyPredictionContext); ok {
|
||||
return a
|
||||
}
|
||||
if _, ok := b.(*EmptyPredictionContext); ok {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
|
||||
// here.
|
||||
//
|
||||
// TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
|
||||
|
||||
var arp, arb *ArrayPredictionContext
|
||||
var ok bool
|
||||
if arp, ok = a.(*ArrayPredictionContext); ok {
|
||||
} else if _, ok = a.(*BaseSingletonPredictionContext); ok {
|
||||
arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
|
||||
} else if _, ok = a.(*EmptyPredictionContext); ok {
|
||||
arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
|
||||
}
|
||||
|
||||
if arb, ok = b.(*ArrayPredictionContext); ok {
|
||||
} else if _, ok = b.(*BaseSingletonPredictionContext); ok {
|
||||
arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
|
||||
} else if _, ok = b.(*EmptyPredictionContext); ok {
|
||||
arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
|
||||
}
|
||||
|
||||
// Both arp and arb
|
||||
return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
|
||||
}
|
||||
|
||||
// Merge two {@link SingletonBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Stack tops equal, parents merge is same return left graph.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Same stack top, parents differ merge parents giving array node, then
|
||||
// remainders of those graphs. A Newroot node is created to point to the
|
||||
// merged parents.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to same parent. Make array node for the
|
||||
// root where both element in the root point to the same (original)
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to different parents. Make array node for
|
||||
// the root where each element points to the corresponding original
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// @param mergeCache
|
||||
// /
|
||||
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.Hash(), b.Hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.Hash(), a.Hash())
|
||||
if previous != nil {
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
}
|
||||
|
||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge != nil {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), rootMerge)
|
||||
}
|
||||
return rootMerge
|
||||
}
|
||||
if a.returnState == b.returnState {
|
||||
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
// if parent is same as existing a or b parent or reduced to a parent,
|
||||
// return it
|
||||
if parent == a.parentCtx {
|
||||
return a // ax + bx = ax, if a=b
|
||||
}
|
||||
if parent == b.parentCtx {
|
||||
return b // ax + bx = bx, if a=b
|
||||
}
|
||||
// else: ax + ay = a'[x,y]
|
||||
// merge parents x and y, giving array node with x,y then remainders
|
||||
// of those graphs. dup a, a' points at merged array
|
||||
// Newjoined parent so create Newsingleton pointing to it, a'
|
||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), spc)
|
||||
}
|
||||
return spc
|
||||
}
|
||||
// a != b payloads differ
|
||||
// see if we can collapse parents due to $+x parents if local ctx
|
||||
var singleParent PredictionContext
|
||||
if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
|
||||
// bx =
|
||||
// [a,b]x
|
||||
singleParent = a.parentCtx
|
||||
}
|
||||
if singleParent != nil { // parents are same
|
||||
// sort payloads and use same parent
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
if a.returnState > b.returnState {
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
}
|
||||
parents := []PredictionContext{singleParent, singleParent}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
// parents differ and can't merge them. Just pack together
|
||||
// into array can't merge.
|
||||
// ax + by = [ax,by]
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
parents := []PredictionContext{a.parentCtx, b.parentCtx}
|
||||
if a.returnState > b.returnState { // sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
parents = []PredictionContext{b.parentCtx, a.parentCtx}
|
||||
}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
|
||||
// Handle case where at least one of {@code a} or {@code b} is
|
||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
||||
// to represent {@link //EMPTY}.
|
||||
//
|
||||
// <h2>Local-Context Merges</h2>
|
||||
//
|
||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
|
||||
// is true.</p>
|
||||
//
|
||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
|
||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
|
||||
// {@code //EMPTY} return left graph.<br>
|
||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Special case of last merge if local context.<br>
|
||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <h2>Full-Context Merges</h2>
|
||||
//
|
||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
|
||||
// is false.</p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
|
||||
// nil parent).<br>
|
||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// /
|
||||
func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
|
||||
if rootIsWildcard {
|
||||
if a == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // // + b =//
|
||||
}
|
||||
if b == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // a +// =//
|
||||
}
|
||||
} else {
|
||||
if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
|
||||
return BasePredictionContextEMPTY // $ + $ = $
|
||||
} else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
|
||||
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []PredictionContext{b.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
} else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
|
||||
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []PredictionContext{a.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Different tops, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, same parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, all shared parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Equal tops, merge parents and reduce top to
|
||||
// {@link SingletonBasePredictionContext}.<br>
|
||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
||||
// /
|
||||
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous := mergeCache.Get(a.Hash(), b.Hash())
|
||||
if previous != nil {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
previous = mergeCache.Get(b.Hash(), a.Hash())
|
||||
if previous != nil {
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous.(PredictionContext)
|
||||
}
|
||||
}
|
||||
// merge sorted payloads a + b => M
|
||||
i := 0 // walks a
|
||||
j := 0 // walks b
|
||||
k := 0 // walks target M array
|
||||
|
||||
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
|
||||
mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
|
||||
// walk and merge to yield mergedParents, mergedReturnStates
|
||||
for i < len(a.returnStates) && j < len(b.returnStates) {
|
||||
aParent := a.parents[i]
|
||||
bParent := b.parents[j]
|
||||
if a.returnStates[i] == b.returnStates[j] {
|
||||
// same payload (stack tops are equal), must yield merged singleton
|
||||
payload := a.returnStates[i]
|
||||
// $+$ = $
|
||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
||||
axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
|
||||
// ->
|
||||
// ax
|
||||
if bothDollars || axAX {
|
||||
mergedParents[k] = aParent // choose left
|
||||
mergedReturnStates[k] = payload
|
||||
} else { // ax+ay -> a'[x,y]
|
||||
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
|
||||
mergedParents[k] = mergedParent
|
||||
mergedReturnStates[k] = payload
|
||||
}
|
||||
i++ // hop over left one as usual
|
||||
j++ // but also Skip one in right side since we merge
|
||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
||||
mergedParents[k] = aParent
|
||||
mergedReturnStates[k] = a.returnStates[i]
|
||||
i++
|
||||
} else { // b > a, copy b[j] to M
|
||||
mergedParents[k] = bParent
|
||||
mergedReturnStates[k] = b.returnStates[j]
|
||||
j++
|
||||
}
|
||||
k++
|
||||
}
|
||||
// copy over any payloads remaining in either array
|
||||
if i < len(a.returnStates) {
|
||||
for p := i; p < len(a.returnStates); p++ {
|
||||
mergedParents[k] = a.parents[p]
|
||||
mergedReturnStates[k] = a.returnStates[p]
|
||||
k++
|
||||
}
|
||||
} else {
|
||||
for p := j; p < len(b.returnStates); p++ {
|
||||
mergedParents[k] = b.parents[p]
|
||||
mergedReturnStates[k] = b.returnStates[p]
|
||||
k++
|
||||
}
|
||||
}
|
||||
// trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents) { // write index < last position trim
|
||||
if k == 1 { // for just one merged element, return singleton top
|
||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), pc)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
}
|
||||
|
||||
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
// if we created same array as a or b, return that instead
|
||||
// TODO: track whether this is possible above during merge sort for speed
|
||||
// TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
|
||||
if M == a {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), a)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
|
||||
}
|
||||
return a
|
||||
}
|
||||
if M == b {
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), b)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
|
||||
}
|
||||
return b
|
||||
}
|
||||
combineCommonParents(mergedParents)
|
||||
|
||||
if mergeCache != nil {
|
||||
mergeCache.set(a.Hash(), b.Hash(), M)
|
||||
}
|
||||
if ParserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
|
||||
}
|
||||
return M
|
||||
}
|
||||
|
||||
// Make pass over all <em>M</em> {@code parents} merge any {@code equals()}
|
||||
// ones.
|
||||
// /
|
||||
func combineCommonParents(parents []PredictionContext) {
|
||||
uniqueParents := make(map[PredictionContext]PredictionContext)
|
||||
|
||||
for p := 0; p < len(parents); p++ {
|
||||
parent := parents[p]
|
||||
if uniqueParents[parent] == nil {
|
||||
uniqueParents[parent] = parent
|
||||
}
|
||||
}
|
||||
for q := 0; q < len(parents); q++ {
|
||||
parents[q] = uniqueParents[parents[q]]
|
||||
}
|
||||
}
|
||||
|
||||
func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
|
||||
|
||||
if context.isEmpty() {
|
||||
return context
|
||||
}
|
||||
existing := visited[context]
|
||||
if existing != nil {
|
||||
return existing
|
||||
}
|
||||
existing = contextCache.Get(context)
|
||||
if existing != nil {
|
||||
visited[context] = existing
|
||||
return existing
|
||||
}
|
||||
changed := false
|
||||
parents := make([]PredictionContext, context.length())
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
|
||||
if changed || parent != context.GetParent(i) {
|
||||
if !changed {
|
||||
parents = make([]PredictionContext, context.length())
|
||||
for j := 0; j < context.length(); j++ {
|
||||
parents[j] = context.GetParent(j)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
parents[i] = parent
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
contextCache.add(context)
|
||||
visited[context] = context
|
||||
return context
|
||||
}
|
||||
var updated PredictionContext
|
||||
if len(parents) == 0 {
|
||||
updated = BasePredictionContextEMPTY
|
||||
} else if len(parents) == 1 {
|
||||
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
|
||||
} else {
|
||||
updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
|
||||
}
|
||||
contextCache.add(updated)
|
||||
visited[updated] = updated
|
||||
visited[context] = updated
|
||||
|
||||
return updated
|
||||
}
|
||||
529
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
generated
vendored
529
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
generated
vendored
@ -1,529 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// This enumeration defines the prediction modes available in ANTLR 4 along with
|
||||
// utility methods for analyzing configuration sets for conflicts and/or
|
||||
// ambiguities.
|
||||
|
||||
const (
|
||||
//
|
||||
// The SLL(*) prediction mode. This prediction mode ignores the current
|
||||
// parser context when making predictions. This is the fastest prediction
|
||||
// mode, and provides correct results for many grammars. This prediction
|
||||
// mode is more powerful than the prediction mode provided by ANTLR 3, but
|
||||
// may result in syntax errors for grammar and input combinations which are
|
||||
// not SLL.
|
||||
//
|
||||
// <p>
|
||||
// When using this prediction mode, the parser will either return a correct
|
||||
// parse tree (i.e. the same parse tree that would be returned with the
|
||||
// {@link //LL} prediction mode), or it will Report a syntax error. If a
|
||||
// syntax error is encountered when using the {@link //SLL} prediction mode,
|
||||
// it may be due to either an actual syntax error in the input or indicate
|
||||
// that the particular combination of grammar and input requires the more
|
||||
// powerful {@link //LL} prediction abilities to complete successfully.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeSLL = 0
|
||||
//
|
||||
// The LL(*) prediction mode. This prediction mode allows the current parser
|
||||
// context to be used for resolving SLL conflicts that occur during
|
||||
// prediction. This is the fastest prediction mode that guarantees correct
|
||||
// parse results for all combinations of grammars with syntactically correct
|
||||
// inputs.
|
||||
//
|
||||
// <p>
|
||||
// When using this prediction mode, the parser will make correct decisions
|
||||
// for all syntactically-correct grammar and input combinations. However, in
|
||||
// cases where the grammar is truly ambiguous this prediction mode might not
|
||||
// Report a precise answer for <em>exactly which</em> alternatives are
|
||||
// ambiguous.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeLL = 1
|
||||
//
|
||||
// The LL(*) prediction mode with exact ambiguity detection. In addition to
|
||||
// the correctness guarantees provided by the {@link //LL} prediction mode,
|
||||
// this prediction mode instructs the prediction algorithm to determine the
|
||||
// complete and exact set of ambiguous alternatives for every ambiguous
|
||||
// decision encountered while parsing.
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode may be used for diagnosing ambiguities during
|
||||
// grammar development. Due to the performance overhead of calculating sets
|
||||
// of ambiguous alternatives, this prediction mode should be avoided when
|
||||
// the exact results are not necessary.</p>
|
||||
//
|
||||
// <p>
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.</p>
|
||||
//
|
||||
PredictionModeLLExactAmbigDetection = 2
|
||||
)
|
||||
|
||||
// Computes the SLL prediction termination condition.
|
||||
//
|
||||
// <p>
|
||||
// This method computes the SLL prediction termination condition for both of
|
||||
// the following cases.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li>The usual SLL+LL fallback upon SLL conflict</li>
|
||||
// <li>Pure SLL without LL fallback</li>
|
||||
// </ul>
|
||||
//
|
||||
// <p><strong>COMBINED SLL+LL PARSING</strong></p>
|
||||
//
|
||||
// <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
|
||||
// ensured regardless of how the termination condition is computed by this
|
||||
// method. Due to the substantially higher cost of LL prediction, the
|
||||
// prediction should only fall back to LL when the additional lookahead
|
||||
// cannot lead to a unique SLL prediction.</p>
|
||||
//
|
||||
// <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
|
||||
// conflicting subsets should fall back to full LL, even if the
|
||||
// configuration sets don't resolve to the same alternative (e.g.
|
||||
// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
|
||||
// configuration, SLL could continue with the hopes that more lookahead will
|
||||
// resolve via one of those non-conflicting configurations.</p>
|
||||
//
|
||||
// <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
|
||||
// stops when it sees only conflicting configuration subsets. In contrast,
|
||||
// full LL keeps going when there is uncertainty.</p>
|
||||
//
|
||||
// <p><strong>HEURISTIC</strong></p>
|
||||
//
|
||||
// <p>As a heuristic, we stop prediction when we see any conflicting subset
|
||||
// unless we see a state that only has one alternative associated with it.
|
||||
// The single-alt-state thing lets prediction continue upon rules like
|
||||
// (otherwise, it would admit defeat too soon):</p>
|
||||
//
|
||||
// <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }</p>
|
||||
//
|
||||
// <p>When the ATN simulation reaches the state before {@code ”}, it has a
|
||||
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
|
||||
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
|
||||
// processing this node because alternative to has another way to continue,
|
||||
// via {@code [6|2|[]]}.</p>
|
||||
//
|
||||
// <p>It also let's us continue for this rule:</p>
|
||||
//
|
||||
// <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }</p>
|
||||
//
|
||||
// <p>After Matching input A, we reach the stop state for rule A, state 1.
|
||||
// State 8 is the state right before B. Clearly alternatives 1 and 2
|
||||
// conflict and no amount of further lookahead will separate the two.
|
||||
// However, alternative 3 will be able to continue and so we do not stop
|
||||
// working on this state. In the previous example, we're concerned with
|
||||
// states associated with the conflicting alternatives. Here alt 3 is not
|
||||
// associated with the conflicting configs, but since we can continue
|
||||
// looking for input reasonably, don't declare the state done.</p>
|
||||
//
|
||||
// <p><strong>PURE SLL PARSING</strong></p>
|
||||
//
|
||||
// <p>To handle pure SLL parsing, all we have to do is make sure that we
|
||||
// combine stack contexts for configurations that differ only by semantic
|
||||
// predicate. From there, we can do the usual SLL termination heuristic.</p>
|
||||
//
|
||||
// <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
|
||||
//
|
||||
// <p>SLL decisions don't evaluate predicates until after they reach DFA stop
|
||||
// states because they need to create the DFA cache that works in all
|
||||
// semantic situations. In contrast, full LL evaluates predicates collected
|
||||
// during start state computation so it can ignore predicates thereafter.
|
||||
// This means that SLL termination detection can totally ignore semantic
|
||||
// predicates.</p>
|
||||
//
|
||||
// <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
|
||||
// semantic predicate contexts so we might see two configurations like the
|
||||
// following.</p>
|
||||
//
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
|
||||
//
|
||||
// <p>Before testing these configurations against others, we have to merge
|
||||
// {@code x} and {@code x'} (without modifying the existing configurations).
|
||||
// For example, we test {@code (x+x')==x”} when looking for conflicts in
|
||||
// the following configurations.</p>
|
||||
//
|
||||
// <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}</p>
|
||||
//
|
||||
// <p>If the configuration set has predicates (as indicated by
|
||||
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
|
||||
// the configurations to strip out all of the predicates so that a standard
|
||||
// {@link ATNConfigSet} will merge everything ignoring predicates.</p>
|
||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
|
||||
// Configs in rule stop states indicate reaching the end of the decision
|
||||
// rule (local context) or end of start rule (full context). If all
|
||||
// configs meet this condition, then none of the configurations is able
|
||||
// to Match additional input so we terminate prediction.
|
||||
//
|
||||
if PredictionModeallConfigsInRuleStopStates(configs) {
|
||||
return true
|
||||
}
|
||||
// pure SLL mode parsing
|
||||
if mode == PredictionModeSLL {
|
||||
// Don't bother with combining configs from different semantic
|
||||
// contexts if we can fail over to full LL costs more time
|
||||
// since we'll often fail over anyway.
|
||||
if configs.HasSemanticContext() {
|
||||
// dup configs, tossing out semantic predicates
|
||||
dup := NewBaseATNConfigSet(false)
|
||||
for _, c := range configs.GetItems() {
|
||||
|
||||
// NewBaseATNConfig({semanticContext:}, c)
|
||||
c = NewBaseATNConfig2(c, SemanticContextNone)
|
||||
dup.Add(c, nil)
|
||||
}
|
||||
configs = dup
|
||||
}
|
||||
// now we have combined contexts for configs with dissimilar preds
|
||||
}
|
||||
// pure SLL or combined SLL+LL mode parsing
|
||||
altsets := PredictionModegetConflictingAltSubsets(configs)
|
||||
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
|
||||
}
|
||||
|
||||
// Checks if any configuration in {@code configs} is in a
|
||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// @param configs the configuration set to test
|
||||
// @return {@code true} if any configuration in {@code configs} is in a
|
||||
// {@link RuleStopState}, otherwise {@code false}
|
||||
func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
|
||||
for _, c := range configs.GetItems() {
|
||||
if _, ok := c.GetState().(*RuleStopState); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if all configurations in {@code configs} are in a
|
||||
// {@link RuleStopState}. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// @param configs the configuration set to test
|
||||
// @return {@code true} if all configurations in {@code configs} are in a
|
||||
// {@link RuleStopState}, otherwise {@code false}
|
||||
func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
if _, ok := c.GetState().(*RuleStopState); !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Full LL prediction termination.
|
||||
//
|
||||
// <p>Can we stop looking ahead during ATN simulation or is there some
|
||||
// uncertainty as to which alternative we will ultimately pick, after
|
||||
// consuming more input? Even if there are partial conflicts, we might know
|
||||
// that everything is going to resolve to the same minimum alternative. That
|
||||
// means we can stop since no more lookahead will change that fact. On the
|
||||
// other hand, there might be multiple conflicts that resolve to different
|
||||
// minimums. That means we need more look ahead to decide which of those
|
||||
// alternatives we should predict.</p>
|
||||
//
|
||||
// <p>The basic idea is to split the set of configurations {@code C}, into
|
||||
// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
|
||||
// non-conflicting configurations. Two configurations conflict if they have
|
||||
// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
|
||||
// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
|
||||
// and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
|
||||
//
|
||||
// <p>Reduce these configuration subsets to the set of possible alternatives.
|
||||
// You can compute the alternative subsets in one pass as follows:</p>
|
||||
//
|
||||
// <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
|
||||
// {@code C} holding {@code s} and {@code ctx} fixed.</p>
|
||||
//
|
||||
// <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
|
||||
//
|
||||
// <pre>
|
||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
||||
// alt and not pred
|
||||
// </pre>
|
||||
//
|
||||
// <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
|
||||
//
|
||||
// <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
|
||||
// {@code s} and {@code ctx}.</p>
|
||||
//
|
||||
// <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
|
||||
// the union of these alternative subsets is a singleton, then no amount of
|
||||
// more lookahead will help us. We will always pick that alternative. If,
|
||||
// however, there is more than one alternative, then we are uncertain which
|
||||
// alternative to predict and must continue looking for resolution. We may
|
||||
// or may not discover an ambiguity in the future, even if there are no
|
||||
// conflicting subsets this round.</p>
|
||||
//
|
||||
// <p>The biggest sin is to terminate early because it means we've made a
|
||||
// decision but were uncertain as to the eventual outcome. We haven't used
|
||||
// enough lookahead. On the other hand, announcing a conflict too late is no
|
||||
// big deal you will still have the conflict. It's just inefficient. It
|
||||
// might even look until the end of file.</p>
|
||||
//
|
||||
// <p>No special consideration for semantic predicates is required because
|
||||
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
|
||||
// no configuration contains a semantic context during the termination
|
||||
// check.</p>
|
||||
//
|
||||
// <p><strong>CONFLICTING CONFIGS</strong></p>
|
||||
//
|
||||
// <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
|
||||
// when {@code i!=j} but {@code x=x'}. Because we merge all
|
||||
// {@code (s, i, _)} configurations together, that means that there are at
|
||||
// most {@code n} configurations associated with state {@code s} for
|
||||
// {@code n} possible alternatives in the decision. The merged stacks
|
||||
// complicate the comparison of configuration contexts {@code x} and
|
||||
// {@code x'}. Sam checks to see if one is a subset of the other by calling
|
||||
// merge and checking to see if the merged result is either {@code x} or
|
||||
// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
|
||||
// is the superset, then {@code i} is the only possible prediction since the
|
||||
// others resolve to {@code min(i)} as well. However, if {@code x} is
|
||||
// associated with {@code j>i} then at least one stack configuration for
|
||||
// {@code j} is not in conflict with alternative {@code i}. The algorithm
|
||||
// should keep going, looking for more lookahead due to the uncertainty.</p>
|
||||
//
|
||||
// <p>For simplicity, I'm doing a equality check between {@code x} and
|
||||
// {@code x'} that lets the algorithm continue to consume lookahead longer
|
||||
// than necessary. The reason I like the equality is of course the
|
||||
// simplicity but also because that is the test you need to detect the
|
||||
// alternatives that are actually in conflict.</p>
|
||||
//
|
||||
// <p><strong>CONTINUE/STOP RULE</strong></p>
|
||||
//
|
||||
// <p>Continue if union of resolved alternative sets from non-conflicting and
|
||||
// conflicting alternative subsets has more than one alternative. We are
|
||||
// uncertain about which alternative to predict.</p>
|
||||
//
|
||||
// <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
|
||||
// alternatives are still in the running for the amount of input we've
|
||||
// consumed at this point. The conflicting sets let us to strip away
|
||||
// configurations that won't lead to more states because we resolve
|
||||
// conflicts to the configuration with a minimum alternate for the
|
||||
// conflicting set.</p>
|
||||
//
|
||||
// <p><strong>CASES</strong></p>
|
||||
//
|
||||
// <ul>
|
||||
//
|
||||
// <li>no conflicts and more than 1 alternative in set => continue</li>
|
||||
//
|
||||
// <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
|
||||
// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
|
||||
// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
||||
// {@code {1,3}} => continue
|
||||
// </li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
||||
// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
|
||||
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
|
||||
// {@code {1}} => stop and predict 1</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
|
||||
// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {1}} = {@code {1}} => stop and predict 1, can announce
|
||||
// ambiguity {@code {1,2}}</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
|
||||
// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {2}} = {@code {1,2}} => continue</li>
|
||||
//
|
||||
// <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
|
||||
// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
|
||||
// {@code {3}} = {@code {1,3}} => continue</li>
|
||||
//
|
||||
// </ul>
|
||||
//
|
||||
// <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
|
||||
//
|
||||
// <p>If all states Report the same conflicting set of alternatives, then we
|
||||
// know we have the exact ambiguity set.</p>
|
||||
//
|
||||
// <p><code>|A_<em>i</em>|>1</code> and
|
||||
// <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
|
||||
//
|
||||
// <p>In other words, we continue examining lookahead until all {@code A_i}
|
||||
// have more than one alternative and all {@code A_i} are the same. If
|
||||
// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
|
||||
// because the resolved set is {@code {1}}. To determine what the real
|
||||
// ambiguity is, we have to know whether the ambiguity is between one and
|
||||
// two or one and three so we keep going. We can only stop prediction when
|
||||
// we need exact ambiguity detection when the sets look like
|
||||
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
|
||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
|
||||
return PredictionModegetSingleViableAlt(altsets)
|
||||
}
|
||||
|
||||
// Determines if every alternative subset in {@code altsets} contains more
|
||||
// than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every {@link BitSet} in {@code altsets} has
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
|
||||
return !PredictionModehasNonConflictingAltSet(altsets)
|
||||
}
|
||||
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// exactly one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
|
||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Determines if any single alternative subset in {@code altsets} contains
|
||||
// more than one alternative.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
|
||||
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
|
||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() > 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Determines if every alternative subset in {@code altsets} is equivalent.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return {@code true} if every member of {@code altsets} is equal to the
|
||||
// others, otherwise {@code false}
|
||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
||||
var first *BitSet
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if first == nil {
|
||||
first = alts
|
||||
} else if alts != first {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the unique alternative predicted by all alternative subsets in
|
||||
// {@code altsets}. If no such alternative exists, this method returns
|
||||
// {@link ATN//INVALID_ALT_NUMBER}.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
||||
all := PredictionModeGetAlts(altsets)
|
||||
if all.length() == 1 {
|
||||
return all.minValue()
|
||||
}
|
||||
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
// Gets the complete set of represented alternatives for a collection of
|
||||
// alternative subsets. This method returns the union of each {@link BitSet}
|
||||
// in {@code altsets}.
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
// @return the set of represented alternatives in {@code altsets}
|
||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
||||
all := NewBitSet()
|
||||
for _, alts := range altsets {
|
||||
all.or(alts)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
|
||||
// For each configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
|
||||
// alt and not pred
|
||||
// </pre>
|
||||
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
|
||||
configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
|
||||
alts, ok := configToAlts.Get(c)
|
||||
if !ok {
|
||||
alts = NewBitSet()
|
||||
configToAlts.Put(c, alts)
|
||||
}
|
||||
alts.add(c.GetAlt())
|
||||
}
|
||||
|
||||
return configToAlts.Values()
|
||||
}
|
||||
|
||||
// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
|
||||
// configuration {@code c} in {@code configs}:
|
||||
//
|
||||
// <pre>
|
||||
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
|
||||
// </pre>
|
||||
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
|
||||
m := NewAltDict()
|
||||
|
||||
for _, c := range configs.GetItems() {
|
||||
alts := m.Get(c.GetState().String())
|
||||
if alts == nil {
|
||||
alts = NewBitSet()
|
||||
m.put(c.GetState().String(), alts)
|
||||
}
|
||||
alts.(*BitSet).add(c.GetAlt())
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
|
||||
values := PredictionModeGetStateToAltMap(configs).values()
|
||||
for i := 0; i < len(values); i++ {
|
||||
if values[i].(*BitSet).length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
|
||||
result := ATNInvalidAltNumber
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
minAlt := alts.minValue()
|
||||
if result == ATNInvalidAltNumber {
|
||||
result = minAlt
|
||||
} else if result != minAlt { // more than 1 viable alt
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
114
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
generated
vendored
114
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
generated
vendored
@ -1,114 +0,0 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// A rule context is a record of a single rule invocation. It knows
|
||||
// which context invoked it, if any. If there is no parent context, then
|
||||
// naturally the invoking state is not valid. The parent link
|
||||
// provides a chain upwards from the current rule invocation to the root
|
||||
// of the invocation tree, forming a stack. We actually carry no
|
||||
// information about the rule associated with b context (except
|
||||
// when parsing). We keep only the state number of the invoking state from
|
||||
// the ATN submachine that invoked b. Contrast b with the s
|
||||
// pointer inside ParserRuleContext that tracks the current state
|
||||
// being "executed" for the current rule.
|
||||
//
|
||||
// The parent contexts are useful for computing lookahead sets and
|
||||
// getting error information.
|
||||
//
|
||||
// These objects are used during parsing and prediction.
|
||||
// For the special case of parsers, we use the subclass
|
||||
// ParserRuleContext.
|
||||
//
|
||||
// @see ParserRuleContext
|
||||
//
|
||||
|
||||
type RuleContext interface {
|
||||
RuleNode
|
||||
|
||||
GetInvokingState() int
|
||||
SetInvokingState(int)
|
||||
|
||||
GetRuleIndex() int
|
||||
IsEmpty() bool
|
||||
|
||||
GetAltNumber() int
|
||||
SetAltNumber(altNumber int)
|
||||
|
||||
String([]string, RuleContext) string
|
||||
}
|
||||
|
||||
type BaseRuleContext struct {
|
||||
parentCtx RuleContext
|
||||
invokingState int
|
||||
RuleIndex int
|
||||
}
|
||||
|
||||
func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
|
||||
|
||||
rn := new(BaseRuleContext)
|
||||
|
||||
// What context invoked b rule?
|
||||
rn.parentCtx = parent
|
||||
|
||||
// What state invoked the rule associated with b context?
|
||||
// The "return address" is the followState of invokingState
|
||||
// If parent is nil, b should be -1.
|
||||
if parent == nil {
|
||||
rn.invokingState = -1
|
||||
} else {
|
||||
rn.invokingState = invokingState
|
||||
}
|
||||
|
||||
return rn
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetParent(v Tree) {
|
||||
if v == nil {
|
||||
b.parentCtx = nil
|
||||
} else {
|
||||
b.parentCtx = v.(RuleContext)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetInvokingState() int {
|
||||
return b.invokingState
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetInvokingState(t int) {
|
||||
b.invokingState = t
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetRuleIndex() int {
|
||||
return b.RuleIndex
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) GetAltNumber() int {
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
|
||||
|
||||
// A context is empty if there is no invoking state meaning nobody call
|
||||
// current context.
|
||||
func (b *BaseRuleContext) IsEmpty() bool {
|
||||
return b.invokingState == -1
|
||||
}
|
||||
|
||||
// Return the combined text of all child nodes. This method only considers
|
||||
// tokens which have been added to the parse tree.
|
||||
// <p>
|
||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||
// added to the parse trees, they will not appear in the output of b
|
||||
// method.
|
||||
//
|
||||
|
||||
func (b *BaseRuleContext) GetParent() Tree {
|
||||
return b.parentCtx
|
||||
}
|
||||
235
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
generated
vendored
235
vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
generated
vendored
@ -1,235 +0,0 @@
|
||||
package antlr
|
||||
|
||||
import "math"
|
||||
|
||||
const (
|
||||
_initalCapacity = 16
|
||||
_initalBucketCapacity = 8
|
||||
_loadFactor = 0.75
|
||||
)
|
||||
|
||||
type Set interface {
|
||||
Add(value interface{}) (added interface{})
|
||||
Len() int
|
||||
Get(value interface{}) (found interface{})
|
||||
Contains(value interface{}) bool
|
||||
Values() []interface{}
|
||||
Each(f func(interface{}) bool)
|
||||
}
|
||||
|
||||
type array2DHashSet struct {
|
||||
buckets [][]Collectable[any]
|
||||
hashcodeFunction func(interface{}) int
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool
|
||||
|
||||
n int // How many elements in set
|
||||
threshold int // when to expand
|
||||
|
||||
currentPrime int // jump by 4 primes each expand or whatever
|
||||
initialBucketCapacity int
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Each(f func(interface{}) bool) {
|
||||
if as.Len() < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, bucket := range as.buckets {
|
||||
for _, o := range bucket {
|
||||
if o == nil {
|
||||
break
|
||||
}
|
||||
if !f(o) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Values() []interface{} {
|
||||
if as.Len() < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := make([]interface{}, 0, as.Len())
|
||||
as.Each(func(i interface{}) bool {
|
||||
values = append(values, i)
|
||||
return true
|
||||
})
|
||||
return values
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Contains(value Collectable[any]) bool {
|
||||
return as.Get(value) != nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
|
||||
if as.n > as.threshold {
|
||||
as.expand()
|
||||
}
|
||||
return as.innerAdd(value)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) expand() {
|
||||
old := as.buckets
|
||||
|
||||
as.currentPrime += 4
|
||||
|
||||
var (
|
||||
newCapacity = len(as.buckets) << 1
|
||||
newTable = as.createBuckets(newCapacity)
|
||||
newBucketLengths = make([]int, len(newTable))
|
||||
)
|
||||
|
||||
as.buckets = newTable
|
||||
as.threshold = int(float64(newCapacity) * _loadFactor)
|
||||
|
||||
for _, bucket := range old {
|
||||
if bucket == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, o := range bucket {
|
||||
if o == nil {
|
||||
break
|
||||
}
|
||||
|
||||
b := as.getBuckets(o)
|
||||
bucketLength := newBucketLengths[b]
|
||||
var newBucket []Collectable[any]
|
||||
if bucketLength == 0 {
|
||||
// new bucket
|
||||
newBucket = as.createBucket(as.initialBucketCapacity)
|
||||
newTable[b] = newBucket
|
||||
} else {
|
||||
newBucket = newTable[b]
|
||||
if bucketLength == len(newBucket) {
|
||||
// expand
|
||||
newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
|
||||
copy(newBucketCopy[:bucketLength], newBucket)
|
||||
newBucket = newBucketCopy
|
||||
newTable[b] = newBucket
|
||||
}
|
||||
}
|
||||
|
||||
newBucket[bucketLength] = o
|
||||
newBucketLengths[b]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Len() int {
|
||||
return as.n
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b := as.getBuckets(o)
|
||||
bucket := as.buckets[b]
|
||||
if bucket == nil { // no bucket
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, e := range bucket {
|
||||
if e == nil {
|
||||
return nil // empty slot; not there
|
||||
}
|
||||
if as.equalsFunction(e, o) {
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
|
||||
b := as.getBuckets(o)
|
||||
|
||||
bucket := as.buckets[b]
|
||||
|
||||
// new bucket
|
||||
if bucket == nil {
|
||||
bucket = as.createBucket(as.initialBucketCapacity)
|
||||
bucket[0] = o
|
||||
|
||||
as.buckets[b] = bucket
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
// look for it in bucket
|
||||
for i := 0; i < len(bucket); i++ {
|
||||
existing := bucket[i]
|
||||
if existing == nil { // empty slot; not there, add.
|
||||
bucket[i] = o
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
if as.equalsFunction(existing, o) { // found existing, quit
|
||||
return existing
|
||||
}
|
||||
}
|
||||
|
||||
// full bucket, expand and add to end
|
||||
oldLength := len(bucket)
|
||||
bucketCopy := make([]Collectable[any], oldLength<<1)
|
||||
copy(bucketCopy[:oldLength], bucket)
|
||||
bucket = bucketCopy
|
||||
as.buckets[b] = bucket
|
||||
bucket[oldLength] = o
|
||||
as.n++
|
||||
return o
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
|
||||
hash := as.hashcodeFunction(value)
|
||||
return hash & (len(as.buckets) - 1)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
|
||||
return make([][]Collectable[any], cap)
|
||||
}
|
||||
|
||||
func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
|
||||
return make([]Collectable[any], cap)
|
||||
}
|
||||
|
||||
func newArray2DHashSetWithCap(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool,
|
||||
initCap int,
|
||||
initBucketCap int,
|
||||
) *array2DHashSet {
|
||||
if hashcodeFunction == nil {
|
||||
hashcodeFunction = standardHashFunction
|
||||
}
|
||||
|
||||
if equalsFunction == nil {
|
||||
equalsFunction = standardEqualsFunction
|
||||
}
|
||||
|
||||
ret := &array2DHashSet{
|
||||
hashcodeFunction: hashcodeFunction,
|
||||
equalsFunction: equalsFunction,
|
||||
|
||||
n: 0,
|
||||
threshold: int(math.Floor(_initalCapacity * _loadFactor)),
|
||||
|
||||
currentPrime: 1,
|
||||
initialBucketCapacity: initBucketCap,
|
||||
}
|
||||
|
||||
ret.buckets = ret.createBuckets(initCap)
|
||||
return ret
|
||||
}
|
||||
|
||||
func newArray2DHashSet(
|
||||
hashcodeFunction func(interface{}) int,
|
||||
equalsFunction func(Collectable[any], Collectable[any]) bool,
|
||||
) *array2DHashSet {
|
||||
return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
|
||||
}
|
||||
18
vendor/github.com/antlr4-go/antlr/v4/.gitignore
generated
vendored
Normal file
18
vendor/github.com/antlr4-go/antlr/v4/.gitignore
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
### Go template
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# No Goland stuff in this repo
|
||||
.idea
|
||||
28
vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
28
vendor/github.com/antlr4-go/antlr/v4/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither name of copyright holders nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
54
vendor/github.com/antlr4-go/antlr/v4/README.md
generated
vendored
Normal file
54
vendor/github.com/antlr4-go/antlr/v4/README.md
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
|
||||
[](https://pkg.go.dev/github.com/antlr4-go/antlr)
|
||||
[](https://github.com/antlr4-go/antlr/releases/latest)
|
||||
[](https://github.com/antlr4-go/antlr/releases/latest)
|
||||
[](https://github.com/antlr4-go/antlr/commit-activity)
|
||||
[](https://opensource.org/licenses/BSD-3-Clause)
|
||||
[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
|
||||
# ANTLR4 Go Runtime Module Repo
|
||||
|
||||
IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
|
||||
|
||||
- Do not submit PRs or any change requests to this repo
|
||||
- This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
|
||||
- This repo contains the Go runtime that your generated projects should import
|
||||
|
||||
## Introduction
|
||||
|
||||
This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
|
||||
at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
|
||||
the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
|
||||
|
||||
The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
|
||||
|
||||
### Why?
|
||||
|
||||
The `go get` command is unable to retrieve the Go runtime when it is embedded so
|
||||
deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
|
||||
does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
|
||||
causes confusion.
|
||||
|
||||
For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
|
||||
|
||||
```sh
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
|
||||
)
|
||||
```
|
||||
|
||||
Where you would expect to see:
|
||||
|
||||
```sh
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
|
||||
)
|
||||
```
|
||||
|
||||
The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
|
||||
from whence users can expect `go get` to behave as expected.
|
||||
|
||||
|
||||
# Documentation
|
||||
Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
|
||||
migrating existing projects to use the new module location and for information on how to use the Go runtime in
|
||||
general.
|
||||
102
vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
102
vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Package antlr implements the Go version of the ANTLR 4 runtime.
|
||||
|
||||
# The ANTLR Tool
|
||||
|
||||
ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
|
||||
or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
|
||||
From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
|
||||
(or visitor) that makes it easy to respond to the recognition of phrases of interest.
|
||||
|
||||
# Go Runtime
|
||||
|
||||
At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
|
||||
source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
|
||||
ANTLR4 that it is compatible with (I.E. uses the /v4 path).
|
||||
|
||||
However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
|
||||
of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
|
||||
This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
|
||||
list the release tag such as @4.12.0 - this was confusing, to say the least.
|
||||
|
||||
As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
|
||||
(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
|
||||
which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
|
||||
|
||||
This means that if you are using the source code without modules, you should also use the source code in the [new repo].
|
||||
Though we highly recommend that you use go modules, as they are now idiomatic for Go.
|
||||
|
||||
I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
|
||||
|
||||
Go runtime author: [Jim Idle] jimi@idle.ws
|
||||
|
||||
# Code Generation
|
||||
|
||||
ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
|
||||
runtime library, written specifically to support the generated code in the target language. This library is the
|
||||
runtime for the Go target.
|
||||
|
||||
To generate code for the go target, it is generally recommended to place the source grammar files in a package of
|
||||
their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
|
||||
it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
|
||||
that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
|
||||
way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
|
||||
your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
|
||||
it was at any point in its history.
|
||||
|
||||
Here is a general/recommended template for an ANTLR based recognizer in Go:
|
||||
|
||||
.
|
||||
├── parser
|
||||
│ ├── mygrammar.g4
|
||||
│ ├── antlr-4.12.1-complete.jar
|
||||
│ ├── generate.go
|
||||
│ └── generate.sh
|
||||
├── parsing - generated code goes here
|
||||
│ └── error_listeners.go
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── main.go
|
||||
└── main_test.go
|
||||
|
||||
Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
|
||||
|
||||
The generate.go file then looks like this:
|
||||
|
||||
package parser
|
||||
|
||||
//go:generate ./generate.sh
|
||||
|
||||
And the generate.sh file will look similar to this:
|
||||
|
||||
#!/bin/sh
|
||||
|
||||
alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
|
||||
antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
|
||||
|
||||
depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
|
||||
is to generate the code into a
|
||||
|
||||
From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
|
||||
|
||||
go generate ./...
|
||||
|
||||
Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
|
||||
by importing the parsing package.
|
||||
|
||||
There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
|
||||
|
||||
# Copyright Notice
|
||||
|
||||
Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
|
||||
|
||||
Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
|
||||
|
||||
[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
|
||||
[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
|
||||
[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
|
||||
[new repo]: https://github.com/antlr4-go/antlr
|
||||
[Jim Idle]: https://github.com/jimidle
|
||||
[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
|
||||
*/
|
||||
package antlr
|
||||
@ -20,10 +20,11 @@ var ATNInvalidAltNumber int
|
||||
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
|
||||
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
|
||||
type ATN struct {
|
||||
// DecisionToState is the decision points for all rules, subrules, optional
|
||||
// blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
|
||||
|
||||
// DecisionToState is the decision points for all rules, sub-rules, optional
|
||||
// blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
|
||||
// can go back later and build DFA predictors for them. This includes
|
||||
// all the rules, subrules, optional blocks, ()+, ()* etc...
|
||||
// all the rules, sub-rules, optional blocks, ()+, ()* etc...
|
||||
DecisionToState []DecisionState
|
||||
|
||||
// grammarType is the ATN type and is used for deserializing ATNs from strings.
|
||||
@ -51,6 +52,8 @@ type ATN struct {
|
||||
// specified, and otherwise is nil.
|
||||
ruleToTokenType []int
|
||||
|
||||
// ATNStates is a list of all states in the ATN, ordered by state number.
|
||||
//
|
||||
states []ATNState
|
||||
|
||||
mu sync.Mutex
|
||||
335
vendor/github.com/antlr4-go/antlr/v4/atn_config.go
generated
vendored
Normal file
335
vendor/github.com/antlr4-go/antlr/v4/atn_config.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
lexerConfig = iota // Indicates that this ATNConfig is for a lexer
|
||||
parserConfig // Indicates that this ATNConfig is for a parser
|
||||
)
|
||||
|
||||
// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
|
||||
// context). The syntactic context is a graph-structured stack node whose
|
||||
// path(s) to the root is the rule invocation(s) chain used to arrive in the
|
||||
// state. The semantic context is the tree of semantic predicates encountered
|
||||
// before reaching an ATN state.
|
||||
type ATNConfig struct {
|
||||
precedenceFilterSuppressed bool
|
||||
state ATNState
|
||||
alt int
|
||||
context *PredictionContext
|
||||
semanticContext SemanticContext
|
||||
reachesIntoOuterContext int
|
||||
cType int // lexerConfig or parserConfig
|
||||
lexerActionExecutor *LexerActionExecutor
|
||||
passedThroughNonGreedyDecision bool
|
||||
}
|
||||
|
||||
// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
|
||||
func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
return NewATNConfig5(state, alt, context, SemanticContextNone)
|
||||
}
|
||||
|
||||
// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
|
||||
func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Necessary?
|
||||
}
|
||||
|
||||
pac := &ATNConfig{}
|
||||
pac.state = state
|
||||
pac.alt = alt
|
||||
pac.context = context
|
||||
pac.semanticContext = semanticContext
|
||||
pac.cType = parserConfig
|
||||
return pac
|
||||
}
|
||||
|
||||
// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
|
||||
func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
|
||||
return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
|
||||
}
|
||||
|
||||
// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
|
||||
func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
|
||||
return NewATNConfig(c, state, c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
|
||||
func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
|
||||
return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
|
||||
}
|
||||
|
||||
// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
|
||||
func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
|
||||
return NewATNConfig(c, state, context, c.GetSemanticContext())
|
||||
}
|
||||
|
||||
// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
|
||||
// are just wrappers around this one.
|
||||
func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
|
||||
if semanticContext == nil {
|
||||
panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
|
||||
}
|
||||
b := &ATNConfig{}
|
||||
b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
|
||||
b.cType = parserConfig
|
||||
return b
|
||||
}
|
||||
|
||||
func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
|
||||
|
||||
a.state = state
|
||||
a.alt = alt
|
||||
a.context = context
|
||||
a.semanticContext = semanticContext
|
||||
a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
|
||||
a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
|
||||
}
|
||||
|
||||
func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
|
||||
return a.precedenceFilterSuppressed
|
||||
}
|
||||
|
||||
func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
|
||||
a.precedenceFilterSuppressed = v
|
||||
}
|
||||
|
||||
// GetState returns the ATN state associated with this configuration
|
||||
func (a *ATNConfig) GetState() ATNState {
|
||||
return a.state
|
||||
}
|
||||
|
||||
// GetAlt returns the alternative associated with this configuration
|
||||
func (a *ATNConfig) GetAlt() int {
|
||||
return a.alt
|
||||
}
|
||||
|
||||
// SetContext sets the rule invocation stack associated with this configuration
|
||||
func (a *ATNConfig) SetContext(v *PredictionContext) {
|
||||
a.context = v
|
||||
}
|
||||
|
||||
// GetContext returns the rule invocation stack associated with this configuration
|
||||
func (a *ATNConfig) GetContext() *PredictionContext {
|
||||
return a.context
|
||||
}
|
||||
|
||||
// GetSemanticContext returns the semantic context associated with this configuration
|
||||
func (a *ATNConfig) GetSemanticContext() SemanticContext {
|
||||
return a.semanticContext
|
||||
}
|
||||
|
||||
// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
|
||||
func (a *ATNConfig) GetReachesIntoOuterContext() int {
|
||||
return a.reachesIntoOuterContext
|
||||
}
|
||||
|
||||
// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
|
||||
func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
|
||||
a.reachesIntoOuterContext = v
|
||||
}
|
||||
|
||||
// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
|
||||
switch a.cType {
|
||||
case lexerConfig:
|
||||
return a.LEquals(o)
|
||||
case parserConfig:
|
||||
return a.PEquals(o)
|
||||
default:
|
||||
panic("Invalid ATNConfig type")
|
||||
}
|
||||
}
|
||||
|
||||
// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
|
||||
// for a collection.
|
||||
//
|
||||
// An ATN configuration is equal to another if both have the same state, they
|
||||
// predict the same alternative, and syntactic/semantic contexts are the same.
|
||||
func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
|
||||
var other, ok = o.(*ATNConfig)
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if a == other {
|
||||
return true
|
||||
} else if other == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var equal bool
|
||||
|
||||
if a.context == nil {
|
||||
equal = other.context == nil
|
||||
} else {
|
||||
equal = a.context.Equals(other.context)
|
||||
}
|
||||
|
||||
var (
|
||||
nums = a.state.GetStateNumber() == other.state.GetStateNumber()
|
||||
alts = a.alt == other.alt
|
||||
cons = a.semanticContext.Equals(other.semanticContext)
|
||||
sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
|
||||
)
|
||||
|
||||
return nums && alts && cons && sups && equal
|
||||
}
|
||||
|
||||
// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (a *ATNConfig) Hash() int {
|
||||
switch a.cType {
|
||||
case lexerConfig:
|
||||
return a.LHash()
|
||||
case parserConfig:
|
||||
return a.PHash()
|
||||
default:
|
||||
panic("Invalid ATNConfig type")
|
||||
}
|
||||
}
|
||||
|
||||
// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
|
||||
// is required for a collection
|
||||
func (a *ATNConfig) PHash() int {
|
||||
var c int
|
||||
if a.context != nil {
|
||||
c = a.context.Hash()
|
||||
}
|
||||
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, a.state.GetStateNumber())
|
||||
h = murmurUpdate(h, a.alt)
|
||||
h = murmurUpdate(h, c)
|
||||
h = murmurUpdate(h, a.semanticContext.Hash())
|
||||
return murmurFinish(h, 4)
|
||||
}
|
||||
|
||||
// String returns a string representation of the ATNConfig, usually used for debugging purposes
|
||||
func (a *ATNConfig) String() string {
|
||||
var s1, s2, s3 string
|
||||
|
||||
if a.context != nil {
|
||||
s1 = ",[" + fmt.Sprint(a.context) + "]"
|
||||
}
|
||||
|
||||
if a.semanticContext != SemanticContextNone {
|
||||
s2 = "," + fmt.Sprint(a.semanticContext)
|
||||
}
|
||||
|
||||
if a.reachesIntoOuterContext > 0 {
|
||||
s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
|
||||
}
|
||||
|
||||
func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.state = state
|
||||
lac.alt = alt
|
||||
lac.context = context
|
||||
lac.semanticContext = SemanticContextNone
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = c.lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.lexerActionExecutor = c.lexerActionExecutor
|
||||
lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
|
||||
lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
|
||||
lac := &ATNConfig{}
|
||||
lac.state = state
|
||||
lac.alt = alt
|
||||
lac.context = context
|
||||
lac.semanticContext = SemanticContextNone
|
||||
lac.cType = lexerConfig
|
||||
return lac
|
||||
}
|
||||
|
||||
// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (a *ATNConfig) LHash() int {
|
||||
var f int
|
||||
if a.passedThroughNonGreedyDecision {
|
||||
f = 1
|
||||
} else {
|
||||
f = 0
|
||||
}
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, a.state.GetStateNumber())
|
||||
h = murmurUpdate(h, a.alt)
|
||||
h = murmurUpdate(h, a.context.Hash())
|
||||
h = murmurUpdate(h, a.semanticContext.Hash())
|
||||
h = murmurUpdate(h, f)
|
||||
h = murmurUpdate(h, a.lexerActionExecutor.Hash())
|
||||
h = murmurFinish(h, 6)
|
||||
return h
|
||||
}
|
||||
|
||||
// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
|
||||
// the default comparator [ObjEqComparator].
|
||||
func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
|
||||
var otherT, ok = other.(*ATNConfig)
|
||||
if !ok {
|
||||
return false
|
||||
} else if a == otherT {
|
||||
return true
|
||||
} else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
|
||||
return false
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
|
||||
return true
|
||||
case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
|
||||
if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false // One but not both, are nil
|
||||
}
|
||||
|
||||
return a.PEquals(otherT)
|
||||
}
|
||||
|
||||
func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
|
||||
var ds, ok = target.(DecisionState)
|
||||
|
||||
return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
|
||||
}
|
||||
301
vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
generated
vendored
Normal file
301
vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
generated
vendored
Normal file
@ -0,0 +1,301 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ATNConfigSet is a specialized set of ATNConfig that tracks information
|
||||
// about its elements and can combine similar configurations using a
|
||||
// graph-structured stack.
|
||||
type ATNConfigSet struct {
|
||||
cachedHash int
|
||||
|
||||
// configLookup is used to determine whether two ATNConfigSets are equal. We
|
||||
// need all configurations with the same (s, i, _, semctx) to be equal. A key
|
||||
// effectively doubles the number of objects associated with ATNConfigs. All
|
||||
// keys are hashed by (s, i, _, pi), not including the context. Wiped out when
|
||||
// read-only because a set becomes a DFA state.
|
||||
configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
|
||||
|
||||
// configs is the added elements that did not match an existing key in configLookup
|
||||
configs []*ATNConfig
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves re-computation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
conflictingAlts *BitSet
|
||||
|
||||
// dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
|
||||
// we hit a pred while computing a closure operation. Do not make a DFA state
|
||||
// from the ATNConfigSet in this case. TODO: How is this used by parsers?
|
||||
dipsIntoOuterContext bool
|
||||
|
||||
// fullCtx is whether it is part of a full context LL prediction. Used to
|
||||
// determine how to merge $. It is a wildcard with SLL, but not for an LL
|
||||
// context merge.
|
||||
fullCtx bool
|
||||
|
||||
// Used in parser and lexer. In lexer, it indicates we hit a pred
|
||||
// while computing a closure operation. Don't make a DFA state from this set.
|
||||
hasSemanticContext bool
|
||||
|
||||
// readOnly is whether it is read-only. Do not
|
||||
// allow any code to manipulate the set if true because DFA states will point at
|
||||
// sets and those must not change. It not, protect other fields; conflictingAlts
|
||||
// in particular, which is assigned after readOnly.
|
||||
readOnly bool
|
||||
|
||||
// TODO: These fields make me pretty uncomfortable, but it is nice to pack up
|
||||
// info together because it saves re-computation. Can we track conflicts as they
|
||||
// are added to save scanning configs later?
|
||||
uniqueAlt int
|
||||
}
|
||||
|
||||
// Alts returns the combined set of alts for all the configurations in this set.
|
||||
func (b *ATNConfigSet) Alts() *BitSet {
|
||||
alts := NewBitSet()
|
||||
for _, it := range b.configs {
|
||||
alts.add(it.GetAlt())
|
||||
}
|
||||
return alts
|
||||
}
|
||||
|
||||
// NewATNConfigSet creates a new ATNConfigSet instance.
|
||||
func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
|
||||
return &ATNConfigSet{
|
||||
cachedHash: -1,
|
||||
configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
|
||||
fullCtx: fullCtx,
|
||||
}
|
||||
}
|
||||
|
||||
// Add merges contexts with existing configs for (s, i, pi, _),
|
||||
// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
|
||||
// 'pi' is the [ATNConfig].semanticContext.
|
||||
//
|
||||
// We use (s,i,pi) as the key.
|
||||
// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
|
||||
func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
if config.GetSemanticContext() != SemanticContextNone {
|
||||
b.hasSemanticContext = true
|
||||
}
|
||||
|
||||
if config.GetReachesIntoOuterContext() > 0 {
|
||||
b.dipsIntoOuterContext = true
|
||||
}
|
||||
|
||||
existing, present := b.configLookup.Put(config)
|
||||
|
||||
// The config was not already in the set
|
||||
//
|
||||
if !present {
|
||||
b.cachedHash = -1
|
||||
b.configs = append(b.configs, config) // Track order here
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge a previous (s, i, pi, _) with it and save the result
|
||||
rootIsWildcard := !b.fullCtx
|
||||
merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
|
||||
|
||||
// No need to check for existing.context because config.context is in the cache,
|
||||
// since the only way to create new graphs is the "call rule" and here. We cache
|
||||
// at both places.
|
||||
existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
|
||||
|
||||
// Preserve the precedence filter suppression during the merge
|
||||
if config.getPrecedenceFilterSuppressed() {
|
||||
existing.setPrecedenceFilterSuppressed(true)
|
||||
}
|
||||
|
||||
// Replace the context because there is no need to do alt mapping
|
||||
existing.SetContext(merged)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GetStates returns the set of states represented by all configurations in this config set
|
||||
func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
|
||||
|
||||
// states uses the standard comparator and Hash() provided by the ATNState instance
|
||||
//
|
||||
states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
states.Put(b.configs[i].GetState())
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) GetPredicates() []SemanticContext {
|
||||
predicates := make([]SemanticContext, 0)
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
c := b.configs[i].GetSemanticContext()
|
||||
|
||||
if c != SemanticContextNone {
|
||||
predicates = append(predicates, c)
|
||||
}
|
||||
}
|
||||
|
||||
return predicates
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
|
||||
// Empty indicate no optimization is possible
|
||||
if b.configLookup == nil || b.configLookup.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
config := b.configs[i]
|
||||
config.SetContext(interpreter.getCachedContext(config.GetContext()))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
|
||||
for i := 0; i < len(coll); i++ {
|
||||
b.Add(coll[i], nil)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare The configs are only equal if they are in the same order and their Equals function returns true.
|
||||
// Java uses ArrayList.equals(), which requires the same order.
|
||||
func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
|
||||
if len(b.configs) != len(bs.configs) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(b.configs); i++ {
|
||||
if !b.configs[i].Equals(bs.configs[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
|
||||
if b == other {
|
||||
return true
|
||||
} else if _, ok := other.(*ATNConfigSet); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
other2 := other.(*ATNConfigSet)
|
||||
var eca bool
|
||||
switch {
|
||||
case b.conflictingAlts == nil && other2.conflictingAlts == nil:
|
||||
eca = true
|
||||
case b.conflictingAlts != nil && other2.conflictingAlts != nil:
|
||||
eca = b.conflictingAlts.equals(other2.conflictingAlts)
|
||||
}
|
||||
return b.configs != nil &&
|
||||
b.fullCtx == other2.fullCtx &&
|
||||
b.uniqueAlt == other2.uniqueAlt &&
|
||||
eca &&
|
||||
b.hasSemanticContext == other2.hasSemanticContext &&
|
||||
b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
|
||||
b.Compare(other2)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Hash() int {
|
||||
if b.readOnly {
|
||||
if b.cachedHash == -1 {
|
||||
b.cachedHash = b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
return b.cachedHash
|
||||
}
|
||||
|
||||
return b.hashCodeConfigs()
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) hashCodeConfigs() int {
|
||||
h := 1
|
||||
for _, config := range b.configs {
|
||||
h = 31*h + config.Hash()
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
|
||||
if b.readOnly {
|
||||
panic("not implemented for read-only sets")
|
||||
}
|
||||
if b.configLookup == nil {
|
||||
return false
|
||||
}
|
||||
return b.configLookup.Contains(item)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
|
||||
return b.Contains(item)
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) Clear() {
|
||||
if b.readOnly {
|
||||
panic("set is read-only")
|
||||
}
|
||||
b.configs = make([]*ATNConfig, 0)
|
||||
b.cachedHash = -1
|
||||
b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
|
||||
}
|
||||
|
||||
func (b *ATNConfigSet) String() string {
|
||||
|
||||
s := "["
|
||||
|
||||
for i, c := range b.configs {
|
||||
s += c.String()
|
||||
|
||||
if i != len(b.configs)-1 {
|
||||
s += ", "
|
||||
}
|
||||
}
|
||||
|
||||
s += "]"
|
||||
|
||||
if b.hasSemanticContext {
|
||||
s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
|
||||
}
|
||||
|
||||
if b.uniqueAlt != ATNInvalidAltNumber {
|
||||
s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
|
||||
}
|
||||
|
||||
if b.conflictingAlts != nil {
|
||||
s += ",conflictingAlts=" + b.conflictingAlts.String()
|
||||
}
|
||||
|
||||
if b.dipsIntoOuterContext {
|
||||
s += ",dipsIntoOuterContext"
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
|
||||
// for use in lexers.
|
||||
func NewOrderedATNConfigSet() *ATNConfigSet {
|
||||
return &ATNConfigSet{
|
||||
cachedHash: -1,
|
||||
// This set uses the standard Hash() and Equals() from ATNConfig
|
||||
configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
|
||||
fullCtx: false,
|
||||
}
|
||||
}
|
||||
@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool {
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.readOnly = readOnly
|
||||
}
|
||||
@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool {
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.verifyATN = verifyATN
|
||||
}
|
||||
@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
|
||||
|
||||
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
|
||||
if opts.readOnly {
|
||||
panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
|
||||
panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
|
||||
}
|
||||
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
|
||||
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
|
||||
}
|
||||
@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
|
||||
return &ATNDeserializer{options: options}
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedFunction
|
||||
func stringInSlice(a string, list []string) int {
|
||||
for i, b := range list {
|
||||
if b == a {
|
||||
@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
|
||||
func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
|
||||
m := a.readInt()
|
||||
|
||||
// Preallocate the needed capacity.
|
||||
@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
|
||||
|
||||
bypassStart.endState = bypassStop
|
||||
|
||||
atn.defineDecisionState(bypassStart.BaseDecisionState)
|
||||
atn.defineDecisionState(&bypassStart.BaseDecisionState)
|
||||
|
||||
bypassStop.startState = bypassStart
|
||||
|
||||
@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
|
||||
continue
|
||||
}
|
||||
|
||||
// We analyze the ATN to determine if a ATN decision state is the
|
||||
// We analyze the [ATN] to determine if an ATN decision state is the
|
||||
// decision for the closure block that determines whether a
|
||||
// precedence rule should continue or complete.
|
||||
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
|
||||
@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int {
|
||||
return int(v) // data is 32 bits but int is at least that big
|
||||
}
|
||||
|
||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
||||
func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
|
||||
target := atn.states[trg]
|
||||
|
||||
switch typeIndex {
|
||||
@ -4,7 +4,7 @@
|
||||
|
||||
package antlr
|
||||
|
||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
|
||||
var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
|
||||
|
||||
type IATNSimulator interface {
|
||||
SharedContextCache() *PredictionContextCache
|
||||
@ -18,22 +18,13 @@ type BaseATNSimulator struct {
|
||||
decisionToDFA []*DFA
|
||||
}
|
||||
|
||||
func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
|
||||
b := new(BaseATNSimulator)
|
||||
|
||||
b.atn = atn
|
||||
b.sharedContextCache = sharedContextCache
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
|
||||
func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
|
||||
if b.sharedContextCache == nil {
|
||||
return context
|
||||
}
|
||||
|
||||
visited := make(map[PredictionContext]PredictionContext)
|
||||
|
||||
//visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
|
||||
visited := NewVisitRecord()
|
||||
return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
|
||||
}
|
||||
|
||||
@ -4,7 +4,11 @@
|
||||
|
||||
package antlr
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Constants for serialization.
|
||||
const (
|
||||
@ -25,6 +29,7 @@ const (
|
||||
ATNStateInvalidStateNumber = -1
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var ATNStateInitialNumTransitions = 4
|
||||
|
||||
type ATNState interface {
|
||||
@ -73,7 +78,7 @@ type BaseATNState struct {
|
||||
transitions []Transition
|
||||
}
|
||||
|
||||
func NewBaseATNState() *BaseATNState {
|
||||
func NewATNState() *BaseATNState {
|
||||
return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
|
||||
}
|
||||
|
||||
@ -148,27 +153,46 @@ func (as *BaseATNState) AddTransition(trans Transition, index int) {
|
||||
if len(as.transitions) == 0 {
|
||||
as.epsilonOnlyTransitions = trans.getIsEpsilon()
|
||||
} else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
|
||||
_, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
|
||||
as.epsilonOnlyTransitions = false
|
||||
}
|
||||
|
||||
// TODO: Check code for already present compared to the Java equivalent
|
||||
//alreadyPresent := false
|
||||
//for _, t := range as.transitions {
|
||||
// if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
|
||||
// if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
|
||||
// alreadyPresent = true
|
||||
// break
|
||||
// }
|
||||
// } else if t.getIsEpsilon() && trans.getIsEpsilon() {
|
||||
// alreadyPresent = true
|
||||
// break
|
||||
// }
|
||||
//}
|
||||
//if !alreadyPresent {
|
||||
if index == -1 {
|
||||
as.transitions = append(as.transitions, trans)
|
||||
} else {
|
||||
as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
|
||||
// TODO: as.transitions.splice(index, 1, trans)
|
||||
}
|
||||
//} else {
|
||||
// _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
|
||||
//}
|
||||
}
|
||||
|
||||
type BasicState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewBasicState() *BasicState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateBasic
|
||||
|
||||
return &BasicState{BaseATNState: b}
|
||||
return &BasicState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type DecisionState interface {
|
||||
@ -182,13 +206,19 @@ type DecisionState interface {
|
||||
}
|
||||
|
||||
type BaseDecisionState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
decision int
|
||||
nonGreedy bool
|
||||
}
|
||||
|
||||
func NewBaseDecisionState() *BaseDecisionState {
|
||||
return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
|
||||
return &BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
decision: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseDecisionState) getDecision() int {
|
||||
@ -216,12 +246,20 @@ type BlockStartState interface {
|
||||
|
||||
// BaseBlockStartState is the start of a regular (...) block.
|
||||
type BaseBlockStartState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
endState *BlockEndState
|
||||
}
|
||||
|
||||
func NewBlockStartState() *BaseBlockStartState {
|
||||
return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
|
||||
return &BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBasic,
|
||||
},
|
||||
decision: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseBlockStartState) getEndState() *BlockEndState {
|
||||
@ -233,31 +271,38 @@ func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
|
||||
}
|
||||
|
||||
type BasicBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewBasicBlockStartState() *BasicBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStateBlockStart
|
||||
|
||||
return &BasicBlockStartState{BaseBlockStartState: b}
|
||||
return &BasicBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &BasicBlockStartState{}
|
||||
|
||||
// BlockEndState is a terminal node of a simple (a|b|c) block.
|
||||
type BlockEndState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
startState ATNState
|
||||
}
|
||||
|
||||
func NewBlockEndState() *BlockEndState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateBlockEnd
|
||||
|
||||
return &BlockEndState{BaseATNState: b}
|
||||
return &BlockEndState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateBlockEnd,
|
||||
},
|
||||
startState: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// RuleStopState is the last node in the ATN for a rule, unless that rule is the
|
||||
@ -265,43 +310,48 @@ func NewBlockEndState() *BlockEndState {
|
||||
// encode references to all calls to this rule to compute FOLLOW sets for error
|
||||
// handling.
|
||||
type RuleStopState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewRuleStopState() *RuleStopState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateRuleStop
|
||||
|
||||
return &RuleStopState{BaseATNState: b}
|
||||
return &RuleStopState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateRuleStop,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type RuleStartState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
stopState ATNState
|
||||
isPrecedenceRule bool
|
||||
}
|
||||
|
||||
func NewRuleStartState() *RuleStartState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateRuleStart
|
||||
|
||||
return &RuleStartState{BaseATNState: b}
|
||||
return &RuleStartState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateRuleStart,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
|
||||
// transitions: one to the loop back to start of the block, and one to exit.
|
||||
type PlusLoopbackState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
}
|
||||
|
||||
func NewPlusLoopbackState() *PlusLoopbackState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStatePlusLoopBack
|
||||
|
||||
return &PlusLoopbackState{BaseDecisionState: b}
|
||||
return &PlusLoopbackState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStatePlusLoopBack,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
|
||||
@ -309,85 +359,103 @@ func NewPlusLoopbackState() *PlusLoopbackState {
|
||||
// it is included for completeness. In reality, PlusLoopbackState is the real
|
||||
// decision-making node for A+.
|
||||
type PlusBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
BaseBlockStartState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewPlusBlockStartState() *PlusBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStatePlusBlockStart
|
||||
|
||||
return &PlusBlockStartState{BaseBlockStartState: b}
|
||||
return &PlusBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStatePlusBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &PlusBlockStartState{}
|
||||
|
||||
// StarBlockStartState is the block that begins a closure loop.
|
||||
type StarBlockStartState struct {
|
||||
*BaseBlockStartState
|
||||
BaseBlockStartState
|
||||
}
|
||||
|
||||
func NewStarBlockStartState() *StarBlockStartState {
|
||||
b := NewBlockStartState()
|
||||
|
||||
b.stateType = ATNStateStarBlockStart
|
||||
|
||||
return &StarBlockStartState{BaseBlockStartState: b}
|
||||
return &StarBlockStartState{
|
||||
BaseBlockStartState: BaseBlockStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarBlockStart,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ BlockStartState = &StarBlockStartState{}
|
||||
|
||||
type StarLoopbackState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
}
|
||||
|
||||
func NewStarLoopbackState() *StarLoopbackState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateStarLoopBack
|
||||
|
||||
return &StarLoopbackState{BaseATNState: b}
|
||||
return &StarLoopbackState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarLoopBack,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type StarLoopEntryState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
loopBackState ATNState
|
||||
precedenceRuleDecision bool
|
||||
}
|
||||
|
||||
func NewStarLoopEntryState() *StarLoopEntryState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStateStarLoopEntry
|
||||
|
||||
// False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
|
||||
return &StarLoopEntryState{BaseDecisionState: b}
|
||||
return &StarLoopEntryState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateStarLoopEntry,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// LoopEndState marks the end of a * or + loop.
|
||||
type LoopEndState struct {
|
||||
*BaseATNState
|
||||
BaseATNState
|
||||
loopBackState ATNState
|
||||
}
|
||||
|
||||
func NewLoopEndState() *LoopEndState {
|
||||
b := NewBaseATNState()
|
||||
|
||||
b.stateType = ATNStateLoopEnd
|
||||
|
||||
return &LoopEndState{BaseATNState: b}
|
||||
return &LoopEndState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateLoopEnd,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
|
||||
type TokensStartState struct {
|
||||
*BaseDecisionState
|
||||
BaseDecisionState
|
||||
}
|
||||
|
||||
func NewTokensStartState() *TokensStartState {
|
||||
b := NewBaseDecisionState()
|
||||
|
||||
b.stateType = ATNStateTokenStart
|
||||
|
||||
return &TokensStartState{BaseDecisionState: b}
|
||||
return &TokensStartState{
|
||||
BaseDecisionState: BaseDecisionState{
|
||||
BaseATNState: BaseATNState{
|
||||
stateNumber: ATNStateInvalidStateNumber,
|
||||
stateType: ATNStateTokenStart,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -8,5 +8,5 @@ type CharStream interface {
|
||||
IntStream
|
||||
GetText(int, int) string
|
||||
GetTextFromTokens(start, end Token) string
|
||||
GetTextFromInterval(*Interval) string
|
||||
GetTextFromInterval(Interval) string
|
||||
}
|
||||
@ -28,22 +28,24 @@ type CommonTokenStream struct {
|
||||
// trivial with bt field.
|
||||
fetchedEOF bool
|
||||
|
||||
// index indexs into tokens of the current token (next token to consume).
|
||||
// index into [tokens] of the current token (next token to consume).
|
||||
// tokens[p] should be LT(1). It is set to -1 when the stream is first
|
||||
// constructed or when SetTokenSource is called, indicating that the first token
|
||||
// has not yet been fetched from the token source. For additional information,
|
||||
// see the documentation of IntStream for a description of initializing methods.
|
||||
// see the documentation of [IntStream] for a description of initializing methods.
|
||||
index int
|
||||
|
||||
// tokenSource is the TokenSource from which tokens for the bt stream are
|
||||
// tokenSource is the [TokenSource] from which tokens for the bt stream are
|
||||
// fetched.
|
||||
tokenSource TokenSource
|
||||
|
||||
// tokens is all tokens fetched from the token source. The list is considered a
|
||||
// tokens contains all tokens fetched from the token source. The list is considered a
|
||||
// complete view of the input once fetchedEOF is set to true.
|
||||
tokens []Token
|
||||
}
|
||||
|
||||
// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
|
||||
// tokens and will pull tokens from the given lexer channel.
|
||||
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
|
||||
return &CommonTokenStream{
|
||||
channel: channel,
|
||||
@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllTokens returns all tokens currently pulled from the token source.
|
||||
func (c *CommonTokenStream) GetAllTokens() []Token {
|
||||
return c.tokens
|
||||
}
|
||||
@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) Release(marker int) {}
|
||||
func (c *CommonTokenStream) Release(_ int) {}
|
||||
|
||||
func (c *CommonTokenStream) reset() {
|
||||
func (c *CommonTokenStream) Reset() {
|
||||
c.fetchedEOF = false
|
||||
c.tokens = make([]Token, 0)
|
||||
c.Seek(0)
|
||||
}
|
||||
|
||||
@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() {
|
||||
// Sync makes sure index i in tokens has a token and returns true if a token is
|
||||
// located at index i and otherwise false.
|
||||
func (c *CommonTokenStream) Sync(i int) bool {
|
||||
n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
|
||||
n := i - len(c.tokens) + 1 // How many more elements do we need?
|
||||
|
||||
if n > 0 {
|
||||
fetched := c.fetch(n)
|
||||
@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
|
||||
c.tokenSource = tokenSource
|
||||
c.tokens = make([]Token, 0)
|
||||
c.index = -1
|
||||
c.fetchedEOF = false
|
||||
}
|
||||
|
||||
// NextTokenOnChannel returns the index of the next token on channel given a
|
||||
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
|
||||
// no tokens on channel between i and EOF.
|
||||
func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
|
||||
// no tokens on channel between 'i' and [TokenEOF].
|
||||
func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
|
||||
c.Sync(i)
|
||||
|
||||
if i >= len(c.tokens) {
|
||||
@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To
|
||||
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
|
||||
from := tokenIndex + 1
|
||||
|
||||
// If no onchannel to the right, then nextOnChannel == -1, so set to to last token
|
||||
// If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
|
||||
var to int
|
||||
|
||||
if nextOnChannel == -1 {
|
||||
@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int {
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetAllText() string {
|
||||
return c.GetTextFromInterval(nil)
|
||||
c.Fill()
|
||||
return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
|
||||
@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
|
||||
return c.GetTextFromInterval(interval.GetSourceInterval())
|
||||
}
|
||||
|
||||
func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
|
||||
func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
|
||||
c.lazyInit()
|
||||
|
||||
if interval == nil {
|
||||
c.Fill()
|
||||
interval = NewInterval(0, len(c.tokens)-1)
|
||||
} else {
|
||||
c.Sync(interval.Stop)
|
||||
}
|
||||
c.Sync(interval.Stop)
|
||||
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
||||
@ -18,17 +18,20 @@ package antlr
|
||||
// type safety and avoid having to implement this for every type that we want to perform comparison on.
|
||||
//
|
||||
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
|
||||
// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
|
||||
// allows us to use it in any collection instance that does not require a special hash or equals implementation.
|
||||
type ObjEqComparator[T Collectable[T]] struct{}
|
||||
|
||||
var (
|
||||
aStateEqInst = &ObjEqComparator[ATNState]{}
|
||||
aConfEqInst = &ObjEqComparator[ATNConfig]{}
|
||||
aConfCompInst = &ATNConfigComparator[ATNConfig]{}
|
||||
atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
|
||||
aStateEqInst = &ObjEqComparator[ATNState]{}
|
||||
aConfEqInst = &ObjEqComparator[*ATNConfig]{}
|
||||
|
||||
// aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
|
||||
aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
|
||||
atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
|
||||
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
|
||||
semctxEqInst = &ObjEqComparator[SemanticContext]{}
|
||||
atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
|
||||
atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
|
||||
pContextEqInst = &ObjEqComparator[*PredictionContext]{}
|
||||
)
|
||||
|
||||
// Equals2 delegates to the Equals() method of type T
|
||||
@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int {
|
||||
|
||||
type SemCComparator[T Collectable[T]] struct{}
|
||||
|
||||
// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
|
||||
// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type ATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
|
||||
hash := 7
|
||||
hash = 31*hash + o.GetState().GetStateNumber()
|
||||
hash = 31*hash + o.GetAlt()
|
||||
@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
}
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
|
||||
func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
h := murmurInit(7)
|
||||
h = murmurUpdate(h, o.GetState().GetStateNumber())
|
||||
h = murmurUpdate(h, o.GetContext().Hash())
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
|
||||
// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
|
||||
// and has a custom Equals() and Hash() implementation, because equality is not based on the
|
||||
// standard Hash() and Equals() methods of the ATNConfig type.
|
||||
type BaseATNConfigComparator[T Collectable[T]] struct {
|
||||
}
|
||||
|
||||
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
|
||||
func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
|
||||
|
||||
// Same pointer, must be equal, even if both nil
|
||||
//
|
||||
@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
|
||||
|
||||
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
|
||||
// delegates to the standard Hash() method of the ATNConfig type.
|
||||
func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
|
||||
|
||||
func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
|
||||
return o.Hash()
|
||||
}
|
||||
214
vendor/github.com/antlr4-go/antlr/v4/configuration.go
generated
vendored
Normal file
214
vendor/github.com/antlr4-go/antlr/v4/configuration.go
generated
vendored
Normal file
@ -0,0 +1,214 @@
|
||||
package antlr
|
||||
|
||||
type runtimeConfiguration struct {
|
||||
statsTraceStacks bool
|
||||
lexerATNSimulatorDebug bool
|
||||
lexerATNSimulatorDFADebug bool
|
||||
parserATNSimulatorDebug bool
|
||||
parserATNSimulatorTraceATNSim bool
|
||||
parserATNSimulatorDFADebug bool
|
||||
parserATNSimulatorRetryDebug bool
|
||||
lRLoopEntryBranchOpt bool
|
||||
memoryManager bool
|
||||
}
|
||||
|
||||
// Global runtime configuration
|
||||
var runtimeConfig = runtimeConfiguration{
|
||||
lRLoopEntryBranchOpt: true,
|
||||
}
|
||||
|
||||
type runtimeOption func(*runtimeConfiguration) error
|
||||
|
||||
// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
|
||||
// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
|
||||
// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
|
||||
// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
|
||||
// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
|
||||
// memory allocation type used by the runtime such as sync.Pool or not.
|
||||
//
|
||||
// The options are applied in the order they are passed in, so the last option will override any previous options.
|
||||
//
|
||||
// For example, if you want to turn on the collection create point stack flag to true, you can do:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
|
||||
//
|
||||
// If you want to turn it off, you can do:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
|
||||
func ConfigureRuntime(options ...runtimeOption) error {
|
||||
for _, option := range options {
|
||||
err := option(&runtimeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
|
||||
// certain structs, such as collections, or the use point of certain methods such as Put().
|
||||
// Because this can be expensive, it is turned off by default. However, it
|
||||
// can be useful to track down exactly where memory is being created and used.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
|
||||
func WithStatsTraceStacks(trace bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.statsTraceStacks = trace
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
|
||||
// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
|
||||
func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lexerATNSimulatorDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
|
||||
// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
|
||||
func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lexerATNSimulatorDFADebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
|
||||
// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
|
||||
func WithParserATNSimulatorDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
|
||||
// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
|
||||
func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorTraceATNSim = trace
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
|
||||
// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
|
||||
// to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
|
||||
func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorDFADebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
|
||||
// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
|
||||
// Only useful to the runtime maintainers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
|
||||
func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.parserATNSimulatorRetryDebug = debug
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
|
||||
// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
|
||||
// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
|
||||
//
|
||||
// Note that default is to use this optimization.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
|
||||
//
|
||||
// You can turn it off at any time using:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
|
||||
func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.lRLoopEntryBranchOpt = off
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
|
||||
// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
|
||||
// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
|
||||
// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
|
||||
// grammar, this may help make it more practical.
|
||||
//
|
||||
// Note that default is to use normal Go memory allocation and not pool memory.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
|
||||
//
|
||||
// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
|
||||
// and should remember to nil out any references to the parser or lexer when you are done with them.
|
||||
func WithMemoryManager(use bool) runtimeOption {
|
||||
return func(config *runtimeConfiguration) error {
|
||||
config.memoryManager = use
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -4,6 +4,8 @@
|
||||
|
||||
package antlr
|
||||
|
||||
// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
|
||||
// reach and the transitions between them.
|
||||
type DFA struct {
|
||||
// atnStartState is the ATN state in which this was created
|
||||
atnStartState DecisionState
|
||||
@ -12,10 +14,9 @@ type DFA struct {
|
||||
|
||||
// states is all the DFA states. Use Map to get the old state back; Set can only
|
||||
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
|
||||
// good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
|
||||
// good, but the DFAState is an object and can't be used directly as the key as it can in say Java
|
||||
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
|
||||
// to see if they really are the same object.
|
||||
//
|
||||
// to see if they really are the same object. Hence, we have our own map storage.
|
||||
//
|
||||
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
|
||||
|
||||
@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
|
||||
dfa := &DFA{
|
||||
atnStartState: atnStartState,
|
||||
decision: decision,
|
||||
states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
|
||||
states: nil, // Lazy initialize
|
||||
}
|
||||
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
|
||||
dfa.precedenceDfa = true
|
||||
dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
|
||||
dfa.s0.isAcceptState = false
|
||||
dfa.s0.requiresFullContext = false
|
||||
}
|
||||
@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool {
|
||||
// true or nil otherwise, and d.precedenceDfa is updated.
|
||||
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
if d.getPrecedenceDfa() != precedenceDfa {
|
||||
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
|
||||
d.states = nil // Lazy initialize
|
||||
d.numstates = 0
|
||||
|
||||
if precedenceDfa {
|
||||
precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
|
||||
|
||||
precedenceState := NewDFAState(-1, NewATNConfigSet(false))
|
||||
precedenceState.setEdges(make([]*DFAState, 0))
|
||||
precedenceState.isAcceptState = false
|
||||
precedenceState.requiresFullContext = false
|
||||
@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
|
||||
// instantiation of the states JMap.
|
||||
func (d *DFA) Len() int {
|
||||
if d.states == nil {
|
||||
return 0
|
||||
}
|
||||
return d.states.Len()
|
||||
}
|
||||
|
||||
// Get returns a state that matches s if it is present in the DFA state set. We defer to this
|
||||
// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
|
||||
func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
|
||||
if d.states == nil {
|
||||
return nil, false
|
||||
}
|
||||
return d.states.Get(s)
|
||||
}
|
||||
|
||||
func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
|
||||
if d.states == nil {
|
||||
d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
|
||||
}
|
||||
return d.states.Put(s)
|
||||
}
|
||||
|
||||
func (d *DFA) getS0() *DFAState {
|
||||
return d.s0
|
||||
}
|
||||
@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) {
|
||||
d.s0 = s
|
||||
}
|
||||
|
||||
// sortedStates returns the states in d sorted by their state number.
|
||||
// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
|
||||
func (d *DFA) sortedStates() []*DFAState {
|
||||
|
||||
if d.states == nil {
|
||||
return []*DFAState{}
|
||||
}
|
||||
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
|
||||
return i.stateNumber < j.stateNumber
|
||||
})
|
||||
@ -10,7 +10,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DFASerializer is a DFA walker that knows how to dump them to serialized
|
||||
// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
|
||||
// strings.
|
||||
type DFASerializer struct {
|
||||
dfa *DFA
|
||||
@ -22,30 +22,31 @@ func (p *PredPrediction) String() string {
|
||||
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
|
||||
}
|
||||
|
||||
// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
|
||||
// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
|
||||
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
|
||||
// states the ATN can be in after reading each input symbol. That is to say,
|
||||
// after reading input a1a2..an, the DFA is in a state that represents the
|
||||
// after reading input a1, a2,..an, the DFA is in a state that represents the
|
||||
// subset T of the states of the ATN that are reachable from the ATN's start
|
||||
// state along some path labeled a1a2..an." In conventional NFA-to-DFA
|
||||
// conversion, therefore, the subset T would be a bitset representing the set of
|
||||
// states the ATN could be in. We need to track the alt predicted by each state
|
||||
// state along some path labeled a1a2..an."
|
||||
//
|
||||
// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
|
||||
// states the [ATN] could be in. We need to track the alt predicted by each state
|
||||
// as well, however. More importantly, we need to maintain a stack of states,
|
||||
// tracking the closure operations as they jump from rule to rule, emulating
|
||||
// rule invocations (method calls). I have to add a stack to simulate the proper
|
||||
// lookahead sequences for the underlying LL grammar from which the ATN was
|
||||
// derived.
|
||||
//
|
||||
// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
|
||||
// state (ala normal conversion) and a RuleContext describing the chain of rules
|
||||
// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
|
||||
// state (ala normal conversion) and a [RuleContext] describing the chain of rules
|
||||
// (if any) followed to arrive at that state.
|
||||
//
|
||||
// A DFAState may have multiple references to a particular state, but with
|
||||
// different ATN contexts (with same or different alts) meaning that state was
|
||||
// A [DFAState] may have multiple references to a particular state, but with
|
||||
// different [ATN] contexts (with same or different alts) meaning that state was
|
||||
// reached via a different set of rule invocations.
|
||||
type DFAState struct {
|
||||
stateNumber int
|
||||
configs ATNConfigSet
|
||||
configs *ATNConfigSet
|
||||
|
||||
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
|
||||
// Token.EOF maps to the first element.
|
||||
@ -53,7 +54,7 @@ type DFAState struct {
|
||||
|
||||
isAcceptState bool
|
||||
|
||||
// prediction is the ttype we match or alt we predict if the state is accept.
|
||||
// prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
|
||||
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
|
||||
// requiresFullContext.
|
||||
prediction int
|
||||
@ -81,9 +82,9 @@ type DFAState struct {
|
||||
predicates []*PredPrediction
|
||||
}
|
||||
|
||||
func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
|
||||
func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
|
||||
if configs == nil {
|
||||
configs = NewBaseATNConfigSet(false)
|
||||
configs = NewATNConfigSet(false)
|
||||
}
|
||||
|
||||
return &DFAState{configs: configs, stateNumber: stateNumber}
|
||||
@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int {
|
||||
var alts []int
|
||||
|
||||
if d.configs != nil {
|
||||
for _, c := range d.configs.GetItems() {
|
||||
for _, c := range d.configs.configs {
|
||||
alts = append(alts, c.GetAlt())
|
||||
}
|
||||
}
|
||||
@ -33,6 +33,7 @@ type DiagnosticErrorListener struct {
|
||||
exactOnly bool
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
|
||||
|
||||
n := new(DiagnosticErrorListener)
|
||||
@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
|
||||
if d.exactOnly && !exact {
|
||||
return
|
||||
}
|
||||
@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
|
||||
|
||||
msg := "reportAttemptingFullContext d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
|
||||
recognizer.NotifyErrorListeners(msg, nil, nil)
|
||||
}
|
||||
|
||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
|
||||
msg := "reportContextSensitivity d=" +
|
||||
d.getDecisionDescription(recognizer, dfa) +
|
||||
", input='" +
|
||||
@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
|
||||
// @param configs The conflicting or ambiguous configuration set.
|
||||
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
|
||||
// returns the set of alternatives represented in {@code configs}.
|
||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
|
||||
func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
|
||||
if ReportedAlts != nil {
|
||||
return ReportedAlts
|
||||
}
|
||||
result := NewBitSet()
|
||||
for _, c := range set.GetItems() {
|
||||
for _, c := range set.configs {
|
||||
result.add(c.GetAlt())
|
||||
}
|
||||
|
||||
@ -16,28 +16,29 @@ import (
|
||||
|
||||
type ErrorListener interface {
|
||||
SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
|
||||
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
|
||||
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
|
||||
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
|
||||
ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
|
||||
ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
|
||||
ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
|
||||
}
|
||||
|
||||
type DefaultErrorListener struct {
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewDefaultErrorListener() *DefaultErrorListener {
|
||||
return new(DefaultErrorListener)
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
|
||||
}
|
||||
|
||||
type ConsoleErrorListener struct {
|
||||
@ -48,21 +49,16 @@ func NewConsoleErrorListener() *ConsoleErrorListener {
|
||||
return new(ConsoleErrorListener)
|
||||
}
|
||||
|
||||
// Provides a default instance of {@link ConsoleErrorListener}.
|
||||
// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
|
||||
var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
|
||||
|
||||
// {@inheritDoc}
|
||||
// SyntaxError prints messages to System.err containing the
|
||||
// values of line, charPositionInLine, and msg using
|
||||
// the following format:
|
||||
//
|
||||
// <p>
|
||||
// This implementation prints messages to {@link System//err} containing the
|
||||
// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
|
||||
// the following format.</p>
|
||||
//
|
||||
// <pre>
|
||||
// line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
|
||||
// </pre>
|
||||
func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
|
||||
fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
||||
// line <line>:<charPositionInLine> <msg>
|
||||
func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
|
||||
_, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
|
||||
}
|
||||
|
||||
type ProxyErrorListener struct {
|
||||
@ -85,19 +81,19 @@ func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
|
||||
func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
|
||||
func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
|
||||
func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
|
||||
for _, d := range p.delegates {
|
||||
d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
|
||||
}
|
||||
@ -21,8 +21,8 @@ type ErrorStrategy interface {
|
||||
ReportMatch(Parser)
|
||||
}
|
||||
|
||||
// This is the default implementation of {@link ANTLRErrorStrategy} used for
|
||||
// error Reporting and recovery in ANTLR parsers.
|
||||
// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
|
||||
// error reporting and recovery in ANTLR parsers.
|
||||
type DefaultErrorStrategy struct {
|
||||
errorRecoveryMode bool
|
||||
lastErrorIndex int
|
||||
@ -46,7 +46,7 @@ func NewDefaultErrorStrategy() *DefaultErrorStrategy {
|
||||
// The index into the input stream where the last error occurred.
|
||||
// This is used to prevent infinite loops where an error is found
|
||||
// but no token is consumed during recovery...another error is found,
|
||||
// ad nauseum. This is a failsafe mechanism to guarantee that at least
|
||||
// ad nauseam. This is a failsafe mechanism to guarantee that at least
|
||||
// one token/tree node is consumed for two errors.
|
||||
//
|
||||
d.lastErrorIndex = -1
|
||||
@ -62,50 +62,37 @@ func (d *DefaultErrorStrategy) reset(recognizer Parser) {
|
||||
|
||||
// This method is called to enter error recovery mode when a recognition
|
||||
// exception is Reported.
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
|
||||
func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
|
||||
d.errorRecoveryMode = true
|
||||
}
|
||||
|
||||
func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
|
||||
func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
|
||||
return d.errorRecoveryMode
|
||||
}
|
||||
|
||||
// This method is called to leave error recovery mode after recovering from
|
||||
// a recognition exception.
|
||||
//
|
||||
// @param recognizer
|
||||
func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
|
||||
func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
|
||||
d.errorRecoveryMode = false
|
||||
d.lastErrorStates = nil
|
||||
d.lastErrorIndex = -1
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation simply calls {@link //endErrorCondition}.</p>
|
||||
// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
|
||||
func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
|
||||
d.endErrorCondition(recognizer)
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
// ReportError is the default implementation of error reporting.
|
||||
// It returns immediately if the handler is already
|
||||
// in error recovery mode. Otherwise, it calls [beginErrorCondition]
|
||||
// and dispatches the Reporting task based on the runtime type of e
|
||||
// according to the following table.
|
||||
//
|
||||
// <p>The default implementation returns immediately if the handler is already
|
||||
// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
|
||||
// and dispatches the Reporting task based on the runtime type of {@code e}
|
||||
// according to the following table.</p>
|
||||
//
|
||||
// <ul>
|
||||
// <li>{@link NoViableAltException}: Dispatches the call to
|
||||
// {@link //ReportNoViableAlternative}</li>
|
||||
// <li>{@link InputMisMatchException}: Dispatches the call to
|
||||
// {@link //ReportInputMisMatch}</li>
|
||||
// <li>{@link FailedPredicateException}: Dispatches the call to
|
||||
// {@link //ReportFailedPredicate}</li>
|
||||
// <li>All other types: calls {@link Parser//NotifyErrorListeners} to Report
|
||||
// the exception</li>
|
||||
// </ul>
|
||||
// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
|
||||
// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
|
||||
// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
|
||||
// All other types : Calls [NotifyErrorListeners] to Report the exception
|
||||
func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
|
||||
// if we've already Reported an error and have not Matched a token
|
||||
// yet successfully, don't Report any errors.
|
||||
@ -128,12 +115,10 @@ func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionExcep
|
||||
}
|
||||
}
|
||||
|
||||
// {@inheritDoc}
|
||||
//
|
||||
// <p>The default implementation reSynchronizes the parser by consuming tokens
|
||||
// until we find one in the reSynchronization set--loosely the set of tokens
|
||||
// that can follow the current rule.</p>
|
||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
// Recover is the default recovery implementation.
|
||||
// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
|
||||
// loosely the set of tokens that can follow the current rule.
|
||||
func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
|
||||
|
||||
if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
|
||||
d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
|
||||
@ -148,54 +133,58 @@ func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException
|
||||
d.lastErrorStates = NewIntervalSet()
|
||||
}
|
||||
d.lastErrorStates.addOne(recognizer.GetState())
|
||||
followSet := d.getErrorRecoverySet(recognizer)
|
||||
followSet := d.GetErrorRecoverySet(recognizer)
|
||||
d.consumeUntil(recognizer, followSet)
|
||||
}
|
||||
|
||||
// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
|
||||
// that the current lookahead symbol is consistent with what were expecting
|
||||
// at d point in the ATN. You can call d anytime but ANTLR only
|
||||
// generates code to check before subrules/loops and each iteration.
|
||||
// Sync is the default implementation of error strategy synchronization.
|
||||
//
|
||||
// <p>Implements Jim Idle's magic Sync mechanism in closures and optional
|
||||
// subrules. E.g.,</p>
|
||||
// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
|
||||
// at this point in the [ATN]. You can call this anytime but ANTLR only
|
||||
// generates code to check before sub-rules/loops and each iteration.
|
||||
//
|
||||
// <pre>
|
||||
// a : Sync ( stuff Sync )*
|
||||
// Sync : {consume to what can follow Sync}
|
||||
// </pre>
|
||||
// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
|
||||
// sub-rules. E.g.:
|
||||
//
|
||||
// At the start of a sub rule upon error, {@link //Sync} performs single
|
||||
// a : Sync ( stuff Sync )*
|
||||
// Sync : {consume to what can follow Sync}
|
||||
//
|
||||
// At the start of a sub-rule upon error, Sync performs single
|
||||
// token deletion, if possible. If it can't do that, it bails on the current
|
||||
// rule and uses the default error recovery, which consumes until the
|
||||
// reSynchronization set of the current rule.
|
||||
//
|
||||
// <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
|
||||
// with an empty alternative), then the expected set includes what follows
|
||||
// the subrule.</p>
|
||||
// If the sub-rule is optional
|
||||
//
|
||||
// <p>During loop iteration, it consumes until it sees a token that can start a
|
||||
// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
||||
// stay in the loop as long as possible.</p>
|
||||
// ({@code (...)?}, {@code (...)*},
|
||||
//
|
||||
// <p><strong>ORIGINS</strong></p>
|
||||
// or a block with an empty alternative), then the expected set includes what follows
|
||||
// the sub-rule.
|
||||
//
|
||||
// <p>Previous versions of ANTLR did a poor job of their recovery within loops.
|
||||
// During loop iteration, it consumes until it sees a token that can start a
|
||||
// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
|
||||
// stay in the loop as long as possible.
|
||||
//
|
||||
// # Origins
|
||||
//
|
||||
// Previous versions of ANTLR did a poor job of their recovery within loops.
|
||||
// A single mismatch token or missing token would force the parser to bail
|
||||
// out of the entire rules surrounding the loop. So, for rule</p>
|
||||
// out of the entire rules surrounding the loop. So, for rule:
|
||||
//
|
||||
// <pre>
|
||||
// classfunc : 'class' ID '{' member* '}'
|
||||
// </pre>
|
||||
// classfunc : 'class' ID '{' member* '}'
|
||||
//
|
||||
// input with an extra token between members would force the parser to
|
||||
// consume until it found the next class definition rather than the next
|
||||
// member definition of the current class.
|
||||
//
|
||||
// <p>This functionality cost a little bit of effort because the parser has to
|
||||
// compare token set at the start of the loop and at each iteration. If for
|
||||
// some reason speed is suffering for you, you can turn off d
|
||||
// functionality by simply overriding d method as a blank { }.</p>
|
||||
// This functionality cost a bit of effort because the parser has to
|
||||
// compare the token set at the start of the loop and at each iteration. If for
|
||||
// some reason speed is suffering for you, you can turn off this
|
||||
// functionality by simply overriding this method as empty:
|
||||
//
|
||||
// { }
|
||||
//
|
||||
// [Jim Idle]: https://github.com/jimidle
|
||||
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
// If already recovering, don't try to Sync
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
@ -217,25 +206,21 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
|
||||
if d.SingleTokenDeletion(recognizer) != nil {
|
||||
return
|
||||
}
|
||||
panic(NewInputMisMatchException(recognizer))
|
||||
recognizer.SetError(NewInputMisMatchException(recognizer))
|
||||
case ATNStatePlusLoopBack, ATNStateStarLoopBack:
|
||||
d.ReportUnwantedToken(recognizer)
|
||||
expecting := NewIntervalSet()
|
||||
expecting.addSet(recognizer.GetExpectedTokens())
|
||||
whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
|
||||
whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
|
||||
d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
|
||||
default:
|
||||
// do nothing if we can't identify the exact kind of ATN state
|
||||
}
|
||||
}
|
||||
|
||||
// This is called by {@link //ReportError} when the exception is a
|
||||
// {@link NoViableAltException}.
|
||||
// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
// See also [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
|
||||
tokens := recognizer.GetTokenStream()
|
||||
var input string
|
||||
@ -252,48 +237,38 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// This is called by {@link //ReportError} when the exception is an
|
||||
// {@link InputMisMatchException}.
|
||||
// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
||||
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
|
||||
// See also: [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
|
||||
msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
|
||||
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// This is called by {@link //ReportError} when the exception is a
|
||||
// {@link FailedPredicateException}.
|
||||
// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
|
||||
//
|
||||
// @see //ReportError
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @param e the recognition exception
|
||||
// See also: [ReportError]
|
||||
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
|
||||
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
|
||||
msg := "rule " + ruleName + " " + e.message
|
||||
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
|
||||
}
|
||||
|
||||
// This method is called to Report a syntax error which requires the removal
|
||||
// ReportUnwantedToken is called to report a syntax error that requires the removal
|
||||
// of a token from the input stream. At the time d method is called, the
|
||||
// erroneous symbol is current {@code LT(1)} symbol and has not yet been
|
||||
// removed from the input stream. When d method returns,
|
||||
// {@code recognizer} is in error recovery mode.
|
||||
// erroneous symbol is the current LT(1) symbol and has not yet been
|
||||
// removed from the input stream. When this method returns,
|
||||
// recognizer is in error recovery mode.
|
||||
//
|
||||
// <p>This method is called when {@link //singleTokenDeletion} identifies
|
||||
// This method is called when singleTokenDeletion identifies
|
||||
// single-token deletion as a viable recovery strategy for a mismatched
|
||||
// input error.</p>
|
||||
// input error.
|
||||
//
|
||||
// <p>The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
||||
// The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls beginErrorCondition to
|
||||
// enter error recovery mode, followed by calling
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// [NotifyErrorListeners]
|
||||
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
@ -307,21 +282,18 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// This method is called to Report a syntax error which requires the
|
||||
// insertion of a missing token into the input stream. At the time d
|
||||
// method is called, the missing token has not yet been inserted. When d
|
||||
// method returns, {@code recognizer} is in error recovery mode.
|
||||
// ReportMissingToken is called to report a syntax error which requires the
|
||||
// insertion of a missing token into the input stream. At the time this
|
||||
// method is called, the missing token has not yet been inserted. When this
|
||||
// method returns, recognizer is in error recovery mode.
|
||||
//
|
||||
// <p>This method is called when {@link //singleTokenInsertion} identifies
|
||||
// This method is called when singleTokenInsertion identifies
|
||||
// single-token insertion as a viable recovery strategy for a mismatched
|
||||
// input error.</p>
|
||||
// input error.
|
||||
//
|
||||
// <p>The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
|
||||
// enter error recovery mode, followed by calling
|
||||
// {@link Parser//NotifyErrorListeners}.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// The default implementation simply returns if the handler is already in
|
||||
// error recovery mode. Otherwise, it calls beginErrorCondition to
|
||||
// enter error recovery mode, followed by calling [NotifyErrorListeners]
|
||||
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
if d.InErrorRecoveryMode(recognizer) {
|
||||
return
|
||||
@ -334,54 +306,48 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
|
||||
recognizer.NotifyErrorListeners(msg, t, nil)
|
||||
}
|
||||
|
||||
// <p>The default implementation attempts to recover from the mismatched input
|
||||
// The RecoverInline default implementation attempts to recover from the mismatched input
|
||||
// by using single token insertion and deletion as described below. If the
|
||||
// recovery attempt fails, d method panics an
|
||||
// {@link InputMisMatchException}.</p>
|
||||
// recovery attempt fails, this method panics with [InputMisMatchException}.
|
||||
// TODO: Not sure that panic() is the right thing to do here - JI
|
||||
//
|
||||
// <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
|
||||
// # EXTRA TOKEN (single token deletion)
|
||||
//
|
||||
// <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
|
||||
// right token, however, then assume {@code LA(1)} is some extra spurious
|
||||
// LA(1) is not what we are looking for. If LA(2) has the
|
||||
// right token, however, then assume LA(1) is some extra spurious
|
||||
// token and delete it. Then consume and return the next token (which was
|
||||
// the {@code LA(2)} token) as the successful result of the Match operation.</p>
|
||||
// the LA(2) token) as the successful result of the Match operation.
|
||||
//
|
||||
// <p>This recovery strategy is implemented by {@link
|
||||
// //singleTokenDeletion}.</p>
|
||||
// # This recovery strategy is implemented by singleTokenDeletion
|
||||
//
|
||||
// <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
|
||||
// # MISSING TOKEN (single token insertion)
|
||||
//
|
||||
// <p>If current token (at {@code LA(1)}) is consistent with what could come
|
||||
// after the expected {@code LA(1)} token, then assume the token is missing
|
||||
// and use the parser's {@link TokenFactory} to create it on the fly. The
|
||||
// "insertion" is performed by returning the created token as the successful
|
||||
// result of the Match operation.</p>
|
||||
// If current token -at LA(1) - is consistent with what could come
|
||||
// after the expected LA(1) token, then assume the token is missing
|
||||
// and use the parser's [TokenFactory] to create it on the fly. The
|
||||
// “insertion” is performed by returning the created token as the successful
|
||||
// result of the Match operation.
|
||||
//
|
||||
// <p>This recovery strategy is implemented by {@link
|
||||
// //singleTokenInsertion}.</p>
|
||||
// This recovery strategy is implemented by [SingleTokenInsertion].
|
||||
//
|
||||
// <p><strong>EXAMPLE</strong></p>
|
||||
// # Example
|
||||
//
|
||||
// <p>For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
|
||||
// the parser returns from the nested call to {@code expr}, it will have
|
||||
// call chain:</p>
|
||||
// For example, Input i=(3 is clearly missing the ')'. When
|
||||
// the parser returns from the nested call to expr, it will have
|
||||
// call the chain:
|
||||
//
|
||||
// <pre>
|
||||
// stat &rarr expr &rarr atom
|
||||
// </pre>
|
||||
// stat → expr → atom
|
||||
//
|
||||
// and it will be trying to Match the {@code ')'} at d point in the
|
||||
// and it will be trying to Match the ')' at this point in the
|
||||
// derivation:
|
||||
//
|
||||
// <pre>
|
||||
// => ID '=' '(' INT ')' ('+' atom)* ”
|
||||
// ^
|
||||
// </pre>
|
||||
// : ID '=' '(' INT ')' ('+' atom)* ';'
|
||||
// ^
|
||||
//
|
||||
// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
|
||||
// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
|
||||
// is in the set of tokens that can follow the {@code ')'} token reference
|
||||
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
|
||||
// The attempt to [Match] ')' will fail when it sees ';' and
|
||||
// call [RecoverInline]. To recover, it sees that LA(1)==';'
|
||||
// is in the set of tokens that can follow the ')' token reference
|
||||
// in rule atom. It can assume that you forgot the ')'.
|
||||
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
// SINGLE TOKEN DELETION
|
||||
MatchedSymbol := d.SingleTokenDeletion(recognizer)
|
||||
@ -396,24 +362,24 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
return d.GetMissingSymbol(recognizer)
|
||||
}
|
||||
// even that didn't work must panic the exception
|
||||
panic(NewInputMisMatchException(recognizer))
|
||||
recognizer.SetError(NewInputMisMatchException(recognizer))
|
||||
return nil
|
||||
}
|
||||
|
||||
// This method implements the single-token insertion inline error recovery
|
||||
// strategy. It is called by {@link //recoverInline} if the single-token
|
||||
// SingleTokenInsertion implements the single-token insertion inline error recovery
|
||||
// strategy. It is called by [RecoverInline] if the single-token
|
||||
// deletion strategy fails to recover from the mismatched input. If this
|
||||
// method returns {@code true}, {@code recognizer} will be in error recovery
|
||||
// mode.
|
||||
//
|
||||
// <p>This method determines whether or not single-token insertion is viable by
|
||||
// checking if the {@code LA(1)} input symbol could be successfully Matched
|
||||
// if it were instead the {@code LA(2)} symbol. If d method returns
|
||||
// This method determines whether single-token insertion is viable by
|
||||
// checking if the LA(1) input symbol could be successfully Matched
|
||||
// if it were instead the LA(2) symbol. If this method returns
|
||||
// {@code true}, the caller is responsible for creating and inserting a
|
||||
// token with the correct type to produce d behavior.</p>
|
||||
// token with the correct type to produce this behavior.</p>
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @return {@code true} if single-token insertion is a viable recovery
|
||||
// strategy for the current mismatched input, otherwise {@code false}
|
||||
// This func returns true if single-token insertion is a viable recovery
|
||||
// strategy for the current mismatched input.
|
||||
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
currentSymbolType := recognizer.GetTokenStream().LA(1)
|
||||
// if current token is consistent with what could come after current
|
||||
@ -431,23 +397,21 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// This method implements the single-token deletion inline error recovery
|
||||
// strategy. It is called by {@link //recoverInline} to attempt to recover
|
||||
// SingleTokenDeletion implements the single-token deletion inline error recovery
|
||||
// strategy. It is called by [RecoverInline] to attempt to recover
|
||||
// from mismatched input. If this method returns nil, the parser and error
|
||||
// handler state will not have changed. If this method returns non-nil,
|
||||
// {@code recognizer} will <em>not</em> be in error recovery mode since the
|
||||
// recognizer will not be in error recovery mode since the
|
||||
// returned token was a successful Match.
|
||||
//
|
||||
// <p>If the single-token deletion is successful, d method calls
|
||||
// {@link //ReportUnwantedToken} to Report the error, followed by
|
||||
// {@link Parser//consume} to actually "delete" the extraneous token. Then,
|
||||
// before returning {@link //ReportMatch} is called to signal a successful
|
||||
// Match.</p>
|
||||
// If the single-token deletion is successful, this method calls
|
||||
// [ReportUnwantedToken] to Report the error, followed by
|
||||
// [Consume] to actually “delete” the extraneous token. Then,
|
||||
// before returning, [ReportMatch] is called to signal a successful
|
||||
// Match.
|
||||
//
|
||||
// @param recognizer the parser instance
|
||||
// @return the successfully Matched {@link Token} instance if single-token
|
||||
// deletion successfully recovers from the mismatched input, otherwise
|
||||
// {@code nil}
|
||||
// The func returns the successfully Matched [Token] instance if single-token
|
||||
// deletion successfully recovers from the mismatched input, otherwise nil.
|
||||
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
NextTokenType := recognizer.GetTokenStream().LA(2)
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
@ -467,24 +431,28 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Conjure up a missing token during error recovery.
|
||||
// GetMissingSymbol conjures up a missing token during error recovery.
|
||||
//
|
||||
// The recognizer attempts to recover from single missing
|
||||
// symbols. But, actions might refer to that missing symbol.
|
||||
// For example, x=ID {f($x)}. The action clearly assumes
|
||||
// For example:
|
||||
//
|
||||
// x=ID {f($x)}.
|
||||
//
|
||||
// The action clearly assumes
|
||||
// that there has been an identifier Matched previously and that
|
||||
// $x points at that token. If that token is missing, but
|
||||
// the next token in the stream is what we want we assume that
|
||||
// d token is missing and we keep going. Because we
|
||||
// this token is missing, and we keep going. Because we
|
||||
// have to return some token to replace the missing token,
|
||||
// we have to conjure one up. This method gives the user control
|
||||
// over the tokens returned for missing tokens. Mostly,
|
||||
// you will want to create something special for identifier
|
||||
// tokens. For literals such as '{' and ',', the default
|
||||
// action in the parser or tree parser works. It simply creates
|
||||
// a CommonToken of the appropriate type. The text will be the token.
|
||||
// If you change what tokens must be created by the lexer,
|
||||
// override d method to create the appropriate tokens.
|
||||
// a [CommonToken] of the appropriate type. The text will be the token name.
|
||||
// If you need to change which tokens must be created by the lexer,
|
||||
// override this method to create the appropriate tokens.
|
||||
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
||||
currentSymbol := recognizer.GetCurrentToken()
|
||||
expecting := d.GetExpectedTokens(recognizer)
|
||||
@ -498,7 +466,7 @@ func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
|
||||
if expectedTokenType > 0 && expectedTokenType < len(ln) {
|
||||
tokenText = "<missing " + recognizer.GetLiteralNames()[expectedTokenType] + ">"
|
||||
} else {
|
||||
tokenText = "<missing undefined>" // TODO matches the JS impl
|
||||
tokenText = "<missing undefined>" // TODO: matches the JS impl
|
||||
}
|
||||
}
|
||||
current := currentSymbol
|
||||
@ -516,13 +484,13 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
|
||||
return recognizer.GetExpectedTokens()
|
||||
}
|
||||
|
||||
// How should a token be displayed in an error message? The default
|
||||
// is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// GetTokenErrorDisplay determines how a token should be displayed in an error message.
|
||||
// The default is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override this func in that case
|
||||
// to use t.String() (which, for [CommonToken], dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
// so that it creates a new type.
|
||||
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
@ -545,52 +513,57 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
return "'" + s + "'"
|
||||
}
|
||||
|
||||
// Compute the error recovery set for the current rule. During
|
||||
// GetErrorRecoverySet computes the error recovery set for the current rule. During
|
||||
// rule invocation, the parser pushes the set of tokens that can
|
||||
// follow that rule reference on the stack d amounts to
|
||||
// follow that rule reference on the stack. This amounts to
|
||||
// computing FIRST of what follows the rule reference in the
|
||||
// enclosing rule. See LinearApproximator.FIRST().
|
||||
//
|
||||
// This local follow set only includes tokens
|
||||
// from within the rule i.e., the FIRST computation done by
|
||||
// ANTLR stops at the end of a rule.
|
||||
//
|
||||
// # EXAMPLE
|
||||
// # Example
|
||||
//
|
||||
// When you find a "no viable alt exception", the input is not
|
||||
// consistent with any of the alternatives for rule r. The best
|
||||
// thing to do is to consume tokens until you see something that
|
||||
// can legally follow a call to r//or* any rule that called r.
|
||||
// can legally follow a call to r or any rule that called r.
|
||||
// You don't want the exact set of viable next tokens because the
|
||||
// input might just be missing a token--you might consume the
|
||||
// rest of the input looking for one of the missing tokens.
|
||||
//
|
||||
// Consider grammar:
|
||||
// Consider the grammar:
|
||||
//
|
||||
// a : '[' b ']'
|
||||
// | '(' b ')'
|
||||
// a : '[' b ']'
|
||||
// | '(' b ')'
|
||||
// ;
|
||||
//
|
||||
// b : c '^' INT
|
||||
// c : ID
|
||||
// | INT
|
||||
// b : c '^' INT
|
||||
// ;
|
||||
//
|
||||
// c : ID
|
||||
// | INT
|
||||
// ;
|
||||
//
|
||||
// At each rule invocation, the set of tokens that could follow
|
||||
// that rule is pushed on a stack. Here are the various
|
||||
// context-sensitive follow sets:
|
||||
//
|
||||
// FOLLOW(b1_in_a) = FIRST(']') = ']'
|
||||
// FOLLOW(b2_in_a) = FIRST(')') = ')'
|
||||
// FOLLOW(c_in_b) = FIRST('^') = '^'
|
||||
// FOLLOW(b1_in_a) = FIRST(']') = ']'
|
||||
// FOLLOW(b2_in_a) = FIRST(')') = ')'
|
||||
// FOLLOW(c_in_b) = FIRST('^') = '^'
|
||||
//
|
||||
// Upon erroneous input "[]", the call chain is
|
||||
// Upon erroneous input “[]”, the call chain is
|
||||
//
|
||||
// a -> b -> c
|
||||
// a → b → c
|
||||
//
|
||||
// and, hence, the follow context stack is:
|
||||
//
|
||||
// depth follow set start of rule execution
|
||||
// 0 <EOF> a (from main())
|
||||
// 1 ']' b
|
||||
// 2 '^' c
|
||||
// Depth Follow set Start of rule execution
|
||||
// 0 <EOF> a (from main())
|
||||
// 1 ']' b
|
||||
// 2 '^' c
|
||||
//
|
||||
// Notice that ')' is not included, because b would have to have
|
||||
// been called from a different context in rule a for ')' to be
|
||||
@ -598,11 +571,14 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
//
|
||||
// For error recovery, we cannot consider FOLLOW(c)
|
||||
// (context-sensitive or otherwise). We need the combined set of
|
||||
// all context-sensitive FOLLOW sets--the set of all tokens that
|
||||
// all context-sensitive FOLLOW sets - the set of all tokens that
|
||||
// could follow any reference in the call chain. We need to
|
||||
// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
|
||||
// we reSync'd to that token, we'd consume until EOF. We need to
|
||||
// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
|
||||
// Sync to context-sensitive FOLLOWs for a, b, and c:
|
||||
//
|
||||
// {']','^'}
|
||||
//
|
||||
// In this case, for input "[]", LA(1) is ']' and in the set, so we would
|
||||
// not consume anything. After printing an error, rule c would
|
||||
// return normally. Rule b would not find the required '^' though.
|
||||
@ -620,22 +596,19 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
|
||||
//
|
||||
// ANTLR's error recovery mechanism is based upon original ideas:
|
||||
//
|
||||
// "Algorithms + Data Structures = Programs" by Niklaus Wirth
|
||||
// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
|
||||
// [A note on error recovery in recursive descent parsers].
|
||||
//
|
||||
// and
|
||||
// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
|
||||
// Parsers]
|
||||
//
|
||||
// "A note on error recovery in recursive descent parsers":
|
||||
// http://portal.acm.org/citation.cfm?id=947902.947905
|
||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
|
||||
// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
|
||||
//
|
||||
// Later, Josef Grosch had some good ideas:
|
||||
//
|
||||
// "Efficient and Comfortable Error Recovery in Recursive Descent
|
||||
// Parsers":
|
||||
// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
||||
//
|
||||
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
|
||||
// at run-time upon error to avoid overhead during parsing.
|
||||
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
|
||||
// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
|
||||
// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
|
||||
// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
|
||||
func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
|
||||
atn := recognizer.GetInterpreter().atn
|
||||
ctx := recognizer.GetParserRuleContext()
|
||||
recoverSet := NewIntervalSet()
|
||||
@ -660,40 +633,36 @@ func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
|
||||
// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
|
||||
// by immediately canceling the parse operation with a
|
||||
// {@link ParseCancellationException}. The implementation ensures that the
|
||||
// {@link ParserRuleContext//exception} field is set for all parse tree nodes
|
||||
// [ParseCancellationException]. The implementation ensures that the
|
||||
// [ParserRuleContext//exception] field is set for all parse tree nodes
|
||||
// that were not completed prior to encountering the error.
|
||||
//
|
||||
// <p>
|
||||
// This error strategy is useful in the following scenarios.</p>
|
||||
// This error strategy is useful in the following scenarios.
|
||||
//
|
||||
// <ul>
|
||||
// <li><strong>Two-stage parsing:</strong> This error strategy allows the first
|
||||
// stage of two-stage parsing to immediately terminate if an error is
|
||||
// encountered, and immediately fall back to the second stage. In addition to
|
||||
// avoiding wasted work by attempting to recover from errors here, the empty
|
||||
// implementation of {@link BailErrorStrategy//Sync} improves the performance of
|
||||
// the first stage.</li>
|
||||
// <li><strong>Silent validation:</strong> When syntax errors are not being
|
||||
// Reported or logged, and the parse result is simply ignored if errors occur,
|
||||
// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
|
||||
// when the result will be ignored either way.</li>
|
||||
// </ul>
|
||||
// - Two-stage parsing: This error strategy allows the first
|
||||
// stage of two-stage parsing to immediately terminate if an error is
|
||||
// encountered, and immediately fall back to the second stage. In addition to
|
||||
// avoiding wasted work by attempting to recover from errors here, the empty
|
||||
// implementation of [BailErrorStrategy.Sync] improves the performance of
|
||||
// the first stage.
|
||||
//
|
||||
// <p>
|
||||
// {@code myparser.setErrorHandler(NewBailErrorStrategy())}</p>
|
||||
// - Silent validation: When syntax errors are not being
|
||||
// Reported or logged, and the parse result is simply ignored if errors occur,
|
||||
// the [BailErrorStrategy] avoids wasting work on recovering from errors
|
||||
// when the result will be ignored either way.
|
||||
//
|
||||
// @see Parser//setErrorHandler(ANTLRErrorStrategy)
|
||||
|
||||
// myparser.SetErrorHandler(NewBailErrorStrategy())
|
||||
//
|
||||
// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
|
||||
type BailErrorStrategy struct {
|
||||
*DefaultErrorStrategy
|
||||
}
|
||||
|
||||
var _ ErrorStrategy = &BailErrorStrategy{}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBailErrorStrategy() *BailErrorStrategy {
|
||||
|
||||
b := new(BailErrorStrategy)
|
||||
@ -703,10 +672,10 @@ func NewBailErrorStrategy() *BailErrorStrategy {
|
||||
return b
|
||||
}
|
||||
|
||||
// Instead of recovering from exception {@code e}, re-panic it wrapped
|
||||
// in a {@link ParseCancellationException} so it is not caught by the
|
||||
// rule func catches. Use {@link Exception//getCause()} to get the
|
||||
// original {@link RecognitionException}.
|
||||
// Recover Instead of recovering from exception e, re-panic it wrapped
|
||||
// in a [ParseCancellationException] so it is not caught by the
|
||||
// rule func catches. Use Exception.GetCause() to get the
|
||||
// original [RecognitionException].
|
||||
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context := recognizer.GetParserRuleContext()
|
||||
for context != nil {
|
||||
@ -717,10 +686,10 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
|
||||
context = nil
|
||||
}
|
||||
}
|
||||
panic(NewParseCancellationException()) // TODO we don't emit e properly
|
||||
recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
|
||||
}
|
||||
|
||||
// Make sure we don't attempt to recover inline if the parser
|
||||
// RecoverInline makes sure we don't attempt to recover inline if the parser
|
||||
// successfully recovers, it won't panic an exception.
|
||||
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
b.Recover(recognizer, NewInputMisMatchException(recognizer))
|
||||
@ -728,7 +697,6 @@ func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure we don't attempt to recover from problems in subrules.//
|
||||
func (b *BailErrorStrategy) Sync(recognizer Parser) {
|
||||
// pass
|
||||
// Sync makes sure we don't attempt to recover from problems in sub-rules.
|
||||
func (b *BailErrorStrategy) Sync(_ Parser) {
|
||||
}
|
||||
@ -35,7 +35,7 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
|
||||
// } else {
|
||||
// stack := NewError().stack
|
||||
// }
|
||||
// TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
|
||||
// TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
|
||||
|
||||
t := new(BaseRecognitionException)
|
||||
|
||||
@ -43,15 +43,17 @@ func NewBaseRecognitionException(message string, recognizer Recognizer, input In
|
||||
t.recognizer = recognizer
|
||||
t.input = input
|
||||
t.ctx = ctx
|
||||
// The current {@link Token} when an error occurred. Since not all streams
|
||||
|
||||
// The current Token when an error occurred. Since not all streams
|
||||
// support accessing symbols by index, we have to track the {@link Token}
|
||||
// instance itself.
|
||||
//
|
||||
t.offendingToken = nil
|
||||
|
||||
// Get the ATN state number the parser was in at the time the error
|
||||
// occurred. For {@link NoViableAltException} and
|
||||
// {@link LexerNoViableAltException} exceptions, this is the
|
||||
// {@link DecisionState} number. For others, it is the state whose outgoing
|
||||
// edge we couldn't Match.
|
||||
// occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
|
||||
// DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
|
||||
//
|
||||
t.offendingState = -1
|
||||
if t.recognizer != nil {
|
||||
t.offendingState = t.recognizer.GetState()
|
||||
@ -74,15 +76,15 @@ func (b *BaseRecognitionException) GetInputStream() IntStream {
|
||||
|
||||
// <p>If the state number is not known, b method returns -1.</p>
|
||||
|
||||
// Gets the set of input symbols which could potentially follow the
|
||||
// previously Matched symbol at the time b exception was panicn.
|
||||
// getExpectedTokens gets the set of input symbols which could potentially follow the
|
||||
// previously Matched symbol at the time this exception was raised.
|
||||
//
|
||||
// <p>If the set of expected tokens is not known and could not be computed,
|
||||
// b method returns {@code nil}.</p>
|
||||
// If the set of expected tokens is not known and could not be computed,
|
||||
// this method returns nil.
|
||||
//
|
||||
// @return The set of token types that could potentially follow the current
|
||||
// state in the ATN, or {@code nil} if the information is not available.
|
||||
// /
|
||||
// The func returns the set of token types that could potentially follow the current
|
||||
// state in the {ATN}, or nil if the information is not available.
|
||||
|
||||
func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
|
||||
if b.recognizer != nil {
|
||||
return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
|
||||
@ -99,10 +101,10 @@ type LexerNoViableAltException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
startIndex int
|
||||
deadEndConfigs ATNConfigSet
|
||||
deadEndConfigs *ATNConfigSet
|
||||
}
|
||||
|
||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
|
||||
func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
|
||||
|
||||
l := new(LexerNoViableAltException)
|
||||
|
||||
@ -128,14 +130,16 @@ type NoViableAltException struct {
|
||||
startToken Token
|
||||
offendingToken Token
|
||||
ctx ParserRuleContext
|
||||
deadEndConfigs ATNConfigSet
|
||||
deadEndConfigs *ATNConfigSet
|
||||
}
|
||||
|
||||
// Indicates that the parser could not decide which of two or more paths
|
||||
// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
|
||||
// to take based upon the remaining input. It tracks the starting token
|
||||
// of the offending input and also knows where the parser was
|
||||
// in the various paths when the error. Reported by ReportNoViableAlternative()
|
||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
||||
// in the various paths when the error.
|
||||
//
|
||||
// Reported by [ReportNoViableAlternative]
|
||||
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
|
||||
|
||||
if ctx == nil {
|
||||
ctx = recognizer.GetParserRuleContext()
|
||||
@ -157,12 +161,14 @@ func NewNoViableAltException(recognizer Parser, input TokenStream, startToken To
|
||||
n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
|
||||
|
||||
// Which configurations did we try at input.Index() that couldn't Match
|
||||
// input.LT(1)?//
|
||||
// input.LT(1)
|
||||
n.deadEndConfigs = deadEndConfigs
|
||||
|
||||
// The token object at the start index the input stream might
|
||||
// not be buffering tokens so get a reference to it. (At the
|
||||
// time the error occurred, of course the stream needs to keep a
|
||||
// buffer all of the tokens but later we might not have access to those.)
|
||||
// not be buffering tokens so get a reference to it.
|
||||
//
|
||||
// At the time the error occurred, of course the stream needs to keep a
|
||||
// buffer of all the tokens, but later we might not have access to those.
|
||||
n.startToken = startToken
|
||||
n.offendingToken = offendingToken
|
||||
|
||||
@ -173,7 +179,7 @@ type InputMisMatchException struct {
|
||||
*BaseRecognitionException
|
||||
}
|
||||
|
||||
// This signifies any kind of mismatched input exceptions such as
|
||||
// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
|
||||
// when the current input does not Match the expected token.
|
||||
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
||||
|
||||
@ -186,11 +192,10 @@ func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
|
||||
|
||||
}
|
||||
|
||||
// A semantic predicate failed during validation. Validation of predicates
|
||||
// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
|
||||
// occurs when normally parsing the alternative just like Matching a token.
|
||||
// Disambiguating predicate evaluation occurs when we test a predicate during
|
||||
// prediction.
|
||||
|
||||
type FailedPredicateException struct {
|
||||
*BaseRecognitionException
|
||||
|
||||
@ -199,6 +204,7 @@ type FailedPredicateException struct {
|
||||
predicate string
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
|
||||
|
||||
f := new(FailedPredicateException)
|
||||
@ -231,6 +237,21 @@ func (f *FailedPredicateException) formatMessage(predicate, message string) stri
|
||||
type ParseCancellationException struct {
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetOffendingToken() Token {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetMessage() string {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p ParseCancellationException) GetInputStream() IntStream {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewParseCancellationException() *ParseCancellationException {
|
||||
// Error.call(this)
|
||||
// Error.captureStackTrace(this, ParseCancellationException)
|
||||
@ -5,8 +5,7 @@
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"bufio"
|
||||
"os"
|
||||
)
|
||||
|
||||
@ -14,34 +13,53 @@ import (
|
||||
// when you construct the object.
|
||||
|
||||
type FileStream struct {
|
||||
*InputStream
|
||||
|
||||
InputStream
|
||||
filename string
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewFileStream(fileName string) (*FileStream, error) {
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(buf, f)
|
||||
|
||||
defer func(f *os.File) {
|
||||
errF := f.Close()
|
||||
if errF != nil {
|
||||
}
|
||||
}(f)
|
||||
|
||||
reader := bufio.NewReader(f)
|
||||
fInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := new(FileStream)
|
||||
fs := &FileStream{
|
||||
InputStream: InputStream{
|
||||
index: 0,
|
||||
name: fileName,
|
||||
},
|
||||
filename: fileName,
|
||||
}
|
||||
|
||||
fs.filename = fileName
|
||||
s := string(buf.Bytes())
|
||||
|
||||
fs.InputStream = NewInputStream(s)
|
||||
// Pre-build the buffer and read runes efficiently
|
||||
//
|
||||
fs.data = make([]rune, 0, fInfo.Size())
|
||||
for {
|
||||
r, _, err := reader.ReadRune()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
fs.data = append(fs.data, r)
|
||||
}
|
||||
fs.size = len(fs.data) // Size in runes
|
||||
|
||||
// All done.
|
||||
//
|
||||
return fs, nil
|
||||
|
||||
}
|
||||
|
||||
func (f *FileStream) GetSourceName() string {
|
||||
157
vendor/github.com/antlr4-go/antlr/v4/input_stream.go
generated
vendored
Normal file
157
vendor/github.com/antlr4-go/antlr/v4/input_stream.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
type InputStream struct {
|
||||
name string
|
||||
index int
|
||||
data []rune
|
||||
size int
|
||||
}
|
||||
|
||||
// NewIoStream creates a new input stream from the given io.Reader reader.
|
||||
// Note that the reader is read completely into memory and so it must actually
|
||||
// have a stopping point - you cannot pass in a reader on an open-ended source such
|
||||
// as a socket for instance.
|
||||
func NewIoStream(reader io.Reader) *InputStream {
|
||||
|
||||
rReader := bufio.NewReader(reader)
|
||||
|
||||
is := &InputStream{
|
||||
name: "<empty>",
|
||||
index: 0,
|
||||
}
|
||||
|
||||
// Pre-build the buffer and read runes reasonably efficiently given that
|
||||
// we don't exactly know how big the input is.
|
||||
//
|
||||
is.data = make([]rune, 0, 512)
|
||||
for {
|
||||
r, _, err := rReader.ReadRune()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
is.data = append(is.data, r)
|
||||
}
|
||||
is.size = len(is.data) // number of runes
|
||||
return is
|
||||
}
|
||||
|
||||
// NewInputStream creates a new input stream from the given string
|
||||
func NewInputStream(data string) *InputStream {
|
||||
|
||||
is := &InputStream{
|
||||
name: "<empty>",
|
||||
index: 0,
|
||||
data: []rune(data), // This is actually the most efficient way
|
||||
}
|
||||
is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
|
||||
return is
|
||||
}
|
||||
|
||||
func (is *InputStream) reset() {
|
||||
is.index = 0
|
||||
}
|
||||
|
||||
// Consume moves the input pointer to the next character in the input stream
|
||||
func (is *InputStream) Consume() {
|
||||
if is.index >= is.size {
|
||||
// assert is.LA(1) == TokenEOF
|
||||
panic("cannot consume EOF")
|
||||
}
|
||||
is.index++
|
||||
}
|
||||
|
||||
// LA returns the character at the given offset from the start of the input stream
|
||||
func (is *InputStream) LA(offset int) int {
|
||||
|
||||
if offset == 0 {
|
||||
return 0 // nil
|
||||
}
|
||||
if offset < 0 {
|
||||
offset++ // e.g., translate LA(-1) to use offset=0
|
||||
}
|
||||
pos := is.index + offset - 1
|
||||
|
||||
if pos < 0 || pos >= is.size { // invalid
|
||||
return TokenEOF
|
||||
}
|
||||
|
||||
return int(is.data[pos])
|
||||
}
|
||||
|
||||
// LT returns the character at the given offset from the start of the input stream
|
||||
func (is *InputStream) LT(offset int) int {
|
||||
return is.LA(offset)
|
||||
}
|
||||
|
||||
// Index returns the current offset in to the input stream
|
||||
func (is *InputStream) Index() int {
|
||||
return is.index
|
||||
}
|
||||
|
||||
// Size returns the total number of characters in the input stream
|
||||
func (is *InputStream) Size() int {
|
||||
return is.size
|
||||
}
|
||||
|
||||
// Mark does nothing here as we have entire buffer
|
||||
func (is *InputStream) Mark() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Release does nothing here as we have entire buffer
|
||||
func (is *InputStream) Release(_ int) {
|
||||
}
|
||||
|
||||
// Seek the input point to the provided index offset
|
||||
func (is *InputStream) Seek(index int) {
|
||||
if index <= is.index {
|
||||
is.index = index // just jump don't update stream state (line,...)
|
||||
return
|
||||
}
|
||||
// seek forward
|
||||
is.index = intMin(index, is.size)
|
||||
}
|
||||
|
||||
// GetText returns the text from the input stream from the start to the stop index
|
||||
func (is *InputStream) GetText(start int, stop int) string {
|
||||
if stop >= is.size {
|
||||
stop = is.size - 1
|
||||
}
|
||||
if start >= is.size {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(is.data[start : stop+1])
|
||||
}
|
||||
|
||||
// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
|
||||
// character of the stop token
|
||||
func (is *InputStream) GetTextFromTokens(start, stop Token) string {
|
||||
if start != nil && stop != nil {
|
||||
return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (is *InputStream) GetTextFromInterval(i Interval) string {
|
||||
return is.GetText(i.Start, i.Stop)
|
||||
}
|
||||
|
||||
func (*InputStream) GetSourceName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// String returns the entire input stream as a string
|
||||
func (is *InputStream) String() string {
|
||||
return string(is.data)
|
||||
}
|
||||
@ -14,20 +14,21 @@ type Interval struct {
|
||||
Stop int
|
||||
}
|
||||
|
||||
/* stop is not included! */
|
||||
func NewInterval(start, stop int) *Interval {
|
||||
i := new(Interval)
|
||||
|
||||
i.Start = start
|
||||
i.Stop = stop
|
||||
return i
|
||||
// NewInterval creates a new interval with the given start and stop values.
|
||||
func NewInterval(start, stop int) Interval {
|
||||
return Interval{
|
||||
Start: start,
|
||||
Stop: stop,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Interval) Contains(item int) bool {
|
||||
// Contains returns true if the given item is contained within the interval.
|
||||
func (i Interval) Contains(item int) bool {
|
||||
return item >= i.Start && item < i.Stop
|
||||
}
|
||||
|
||||
func (i *Interval) String() string {
|
||||
// String generates a string representation of the interval.
|
||||
func (i Interval) String() string {
|
||||
if i.Start == i.Stop-1 {
|
||||
return strconv.Itoa(i.Start)
|
||||
}
|
||||
@ -35,15 +36,18 @@ func (i *Interval) String() string {
|
||||
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
|
||||
}
|
||||
|
||||
func (i *Interval) length() int {
|
||||
// Length returns the length of the interval.
|
||||
func (i Interval) Length() int {
|
||||
return i.Stop - i.Start
|
||||
}
|
||||
|
||||
// IntervalSet represents a collection of [Intervals], which may be read-only.
|
||||
type IntervalSet struct {
|
||||
intervals []*Interval
|
||||
intervals []Interval
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
// NewIntervalSet creates a new empty, writable, interval set.
|
||||
func NewIntervalSet() *IntervalSet {
|
||||
|
||||
i := new(IntervalSet)
|
||||
@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet {
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IntervalSet) Equals(other *IntervalSet) bool {
|
||||
if len(i.intervals) != len(other.intervals) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range i.intervals {
|
||||
if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *IntervalSet) first() int {
|
||||
if len(i.intervals) == 0 {
|
||||
return TokenInvalidType
|
||||
@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) {
|
||||
i.addInterval(NewInterval(l, h+1))
|
||||
}
|
||||
|
||||
func (i *IntervalSet) addInterval(v *Interval) {
|
||||
func (i *IntervalSet) addInterval(v Interval) {
|
||||
if i.intervals == nil {
|
||||
i.intervals = make([]*Interval, 0)
|
||||
i.intervals = make([]Interval, 0)
|
||||
i.intervals = append(i.intervals, v)
|
||||
} else {
|
||||
// find insert pos
|
||||
for k, interval := range i.intervals {
|
||||
// distinct range -> insert
|
||||
if v.Stop < interval.Start {
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Stop == interval.Start {
|
||||
i.intervals[k].Start = v.Start
|
||||
@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool {
|
||||
}
|
||||
|
||||
func (i *IntervalSet) length() int {
|
||||
len := 0
|
||||
iLen := 0
|
||||
|
||||
for _, v := range i.intervals {
|
||||
len += v.length()
|
||||
iLen += v.Length()
|
||||
}
|
||||
|
||||
return len
|
||||
return iLen
|
||||
}
|
||||
|
||||
func (i *IntervalSet) removeRange(v *Interval) {
|
||||
func (i *IntervalSet) removeRange(v Interval) {
|
||||
if v.Start == v.Stop-1 {
|
||||
i.removeOne(v.Start)
|
||||
} else if i.intervals != nil {
|
||||
@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) {
|
||||
i.intervals[k] = NewInterval(ni.Start, v.Start)
|
||||
x := NewInterval(v.Stop, ni.Stop)
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
|
||||
// i.intervals.splice(k, 1)
|
||||
@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) {
|
||||
x := NewInterval(ki.Start, v)
|
||||
ki.Start = v + 1
|
||||
// i.intervals.splice(k, 0, x)
|
||||
i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
|
||||
i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
|
||||
return i.toIndexString()
|
||||
}
|
||||
|
||||
func (i *IntervalSet) GetIntervals() []*Interval {
|
||||
func (i *IntervalSet) GetIntervals() []Interval {
|
||||
return i.intervals
|
||||
}
|
||||
|
||||
685
vendor/github.com/antlr4-go/antlr/v4/jcollect.go
generated
vendored
Normal file
685
vendor/github.com/antlr4-go/antlr/v4/jcollect.go
generated
vendored
Normal file
@ -0,0 +1,685 @@
|
||||
package antlr
|
||||
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Collectable is an interface that a struct should implement if it is to be
|
||||
// usable as a key in these collections.
|
||||
type Collectable[T any] interface {
|
||||
Hash() int
|
||||
Equals(other Collectable[T]) bool
|
||||
}
|
||||
|
||||
type Comparator[T any] interface {
|
||||
Hash1(o T) int
|
||||
Equals2(T, T) bool
|
||||
}
|
||||
|
||||
type CollectionSource int
|
||||
type CollectionDescriptor struct {
|
||||
SybolicName string
|
||||
Description string
|
||||
}
|
||||
|
||||
const (
|
||||
UnknownCollection CollectionSource = iota
|
||||
ATNConfigLookupCollection
|
||||
ATNStateCollection
|
||||
DFAStateCollection
|
||||
ATNConfigCollection
|
||||
PredictionContextCollection
|
||||
SemanticContextCollection
|
||||
ClosureBusyCollection
|
||||
PredictionVisitedCollection
|
||||
MergeCacheCollection
|
||||
PredictionContextCacheCollection
|
||||
AltSetCollection
|
||||
ReachSetCollection
|
||||
)
|
||||
|
||||
var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
|
||||
UnknownCollection: {
|
||||
SybolicName: "UnknownCollection",
|
||||
Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
|
||||
},
|
||||
ATNConfigCollection: {
|
||||
SybolicName: "ATNConfigCollection",
|
||||
Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
|
||||
"For instance, it is used to store the results of the closure() operation in the ATN.",
|
||||
},
|
||||
ATNConfigLookupCollection: {
|
||||
SybolicName: "ATNConfigLookupCollection",
|
||||
Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
|
||||
"This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
|
||||
},
|
||||
ATNStateCollection: {
|
||||
SybolicName: "ATNStateCollection",
|
||||
Description: "ATNState collection. This is used to store the states of the ATN.",
|
||||
},
|
||||
DFAStateCollection: {
|
||||
SybolicName: "DFAStateCollection",
|
||||
Description: "DFAState collection. This is used to store the states of the DFA.",
|
||||
},
|
||||
PredictionContextCollection: {
|
||||
SybolicName: "PredictionContextCollection",
|
||||
Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
|
||||
},
|
||||
SemanticContextCollection: {
|
||||
SybolicName: "SemanticContextCollection",
|
||||
Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
|
||||
},
|
||||
ClosureBusyCollection: {
|
||||
SybolicName: "ClosureBusyCollection",
|
||||
Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
|
||||
"It stores ATNConfigs that are currently being processed in the closure() operation.",
|
||||
},
|
||||
PredictionVisitedCollection: {
|
||||
SybolicName: "PredictionVisitedCollection",
|
||||
Description: "A map that records whether we have visited a particular context when searching through cached entries.",
|
||||
},
|
||||
MergeCacheCollection: {
|
||||
SybolicName: "MergeCacheCollection",
|
||||
Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
|
||||
},
|
||||
PredictionContextCacheCollection: {
|
||||
SybolicName: "PredictionContextCacheCollection",
|
||||
Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
|
||||
},
|
||||
AltSetCollection: {
|
||||
SybolicName: "AltSetCollection",
|
||||
Description: "Used to eliminate duplicate alternatives in an ATN config set.",
|
||||
},
|
||||
ReachSetCollection: {
|
||||
SybolicName: "ReachSetCollection",
|
||||
Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
|
||||
},
|
||||
}
|
||||
|
||||
// JStore implements a container that allows the use of a struct to calculate the key
|
||||
// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
|
||||
// serve the needs of the ANTLR Go runtime.
|
||||
//
|
||||
// For ease of porting the logic of the runtime from the master target (Java), this collection
|
||||
// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
|
||||
// function as the key. The values are stored in a standard go map which internally is a form of hashmap
|
||||
// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
|
||||
// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
|
||||
// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
|
||||
// we understand the requirements, then this is fine - this is not a general purpose collection.
|
||||
type JStore[T any, C Comparator[T]] struct {
|
||||
store map[int][]T
|
||||
len int
|
||||
comparator Comparator[T]
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
|
||||
|
||||
if comparator == nil {
|
||||
panic("comparator cannot be nil")
|
||||
}
|
||||
|
||||
s := &JStore[T, C]{
|
||||
store: make(map[int][]T, 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
if collectStats {
|
||||
s.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
s.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(s.stats)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Put will store given value in the collection. Note that the key for storage is generated from
|
||||
// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
|
||||
// as any kind of general collection.
|
||||
//
|
||||
// If the key has a hash conflict, then the value will be added to the slice of values associated with the
|
||||
// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
|
||||
// tested by calling the equals() method on the key.
|
||||
//
|
||||
// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
|
||||
//
|
||||
// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
|
||||
func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
|
||||
|
||||
if collectStats {
|
||||
s.stats.Puts++
|
||||
}
|
||||
kh := s.comparator.Hash1(value)
|
||||
|
||||
var hClash bool
|
||||
for _, v1 := range s.store[kh] {
|
||||
hClash = true
|
||||
if s.comparator.Equals2(value, v1) {
|
||||
if collectStats {
|
||||
s.stats.PutHits++
|
||||
s.stats.PutHashConflicts++
|
||||
}
|
||||
return v1, true
|
||||
}
|
||||
if collectStats {
|
||||
s.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats && hClash {
|
||||
s.stats.PutHashConflicts++
|
||||
}
|
||||
s.store[kh] = append(s.store[kh], value)
|
||||
|
||||
if collectStats {
|
||||
if len(s.store[kh]) > s.stats.MaxSlotSize {
|
||||
s.stats.MaxSlotSize = len(s.store[kh])
|
||||
}
|
||||
}
|
||||
s.len++
|
||||
if collectStats {
|
||||
s.stats.CurSize = s.len
|
||||
if s.len > s.stats.MaxSize {
|
||||
s.stats.MaxSize = s.len
|
||||
}
|
||||
}
|
||||
return value, false
|
||||
}
|
||||
|
||||
// Get will return the value associated with the key - the type of the key is the same type as the value
|
||||
// which would not generally be useful, but this is a specific thing for ANTLR where the key is
|
||||
// generated using the object we are going to store.
|
||||
func (s *JStore[T, C]) Get(key T) (T, bool) {
|
||||
if collectStats {
|
||||
s.stats.Gets++
|
||||
}
|
||||
kh := s.comparator.Hash1(key)
|
||||
var hClash bool
|
||||
for _, v := range s.store[kh] {
|
||||
hClash = true
|
||||
if s.comparator.Equals2(key, v) {
|
||||
if collectStats {
|
||||
s.stats.GetHits++
|
||||
s.stats.GetHashConflicts++
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
if collectStats {
|
||||
s.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
s.stats.GetHashConflicts++
|
||||
}
|
||||
s.stats.GetNoEnt++
|
||||
}
|
||||
return key, false
|
||||
}
|
||||
|
||||
// Contains returns true if the given key is present in the store
|
||||
func (s *JStore[T, C]) Contains(key T) bool {
|
||||
_, present := s.Get(key)
|
||||
return present
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, v := range s.store {
|
||||
vs = append(vs, v...)
|
||||
}
|
||||
sort.Slice(vs, func(i, j int) bool {
|
||||
return less(vs[i], vs[j])
|
||||
})
|
||||
|
||||
return vs
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Each(f func(T) bool) {
|
||||
for _, e := range s.store {
|
||||
for _, v := range e {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Len() int {
|
||||
return s.len
|
||||
}
|
||||
|
||||
func (s *JStore[T, C]) Values() []T {
|
||||
vs := make([]T, 0, len(s.store))
|
||||
for _, e := range s.store {
|
||||
vs = append(vs, e...)
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
type entry[K, V any] struct {
|
||||
key K
|
||||
val V
|
||||
}
|
||||
|
||||
type JMap[K, V any, C Comparator[K]] struct {
|
||||
store map[int][]*entry[K, V]
|
||||
len int
|
||||
comparator Comparator[K]
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
|
||||
m := &JMap[K, V, C]{
|
||||
store: make(map[int][]*entry[K, V], 1),
|
||||
comparator: comparator,
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
|
||||
if collectStats {
|
||||
m.stats.Puts++
|
||||
}
|
||||
kh := m.comparator.Hash1(key)
|
||||
|
||||
var hClash bool
|
||||
for _, e := range m.store[kh] {
|
||||
hClash = true
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
if collectStats {
|
||||
m.stats.PutHits++
|
||||
m.stats.PutHashConflicts++
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
if collectStats {
|
||||
m.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
m.stats.PutHashConflicts++
|
||||
}
|
||||
}
|
||||
m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
|
||||
if collectStats {
|
||||
if len(m.store[kh]) > m.stats.MaxSlotSize {
|
||||
m.stats.MaxSlotSize = len(m.store[kh])
|
||||
}
|
||||
}
|
||||
m.len++
|
||||
if collectStats {
|
||||
m.stats.CurSize = m.len
|
||||
if m.len > m.stats.MaxSize {
|
||||
m.stats.MaxSize = m.len
|
||||
}
|
||||
}
|
||||
return val, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Values() []V {
|
||||
vs := make([]V, 0, len(m.store))
|
||||
for _, e := range m.store {
|
||||
for _, v := range e {
|
||||
vs = append(vs, v.val)
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Get(key K) (V, bool) {
|
||||
if collectStats {
|
||||
m.stats.Gets++
|
||||
}
|
||||
var none V
|
||||
kh := m.comparator.Hash1(key)
|
||||
var hClash bool
|
||||
for _, e := range m.store[kh] {
|
||||
hClash = true
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
if collectStats {
|
||||
m.stats.GetHits++
|
||||
m.stats.GetHashConflicts++
|
||||
}
|
||||
return e.val, true
|
||||
}
|
||||
if collectStats {
|
||||
m.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
m.stats.GetHashConflicts++
|
||||
}
|
||||
m.stats.GetNoEnt++
|
||||
}
|
||||
return none, false
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Len() int {
|
||||
return m.len
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Delete(key K) {
|
||||
kh := m.comparator.Hash1(key)
|
||||
for i, e := range m.store[kh] {
|
||||
if m.comparator.Equals2(e.key, key) {
|
||||
m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
|
||||
m.len--
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *JMap[K, V, C]) Clear() {
|
||||
m.store = make(map[int][]*entry[K, V])
|
||||
}
|
||||
|
||||
type JPCMap struct {
|
||||
store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
|
||||
size int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
|
||||
m := &JPCMap{
|
||||
store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Gets++
|
||||
}
|
||||
// Do we have a map stored by k1?
|
||||
//
|
||||
m2, present := pcm.store.Get(k1)
|
||||
if present {
|
||||
if collectStats {
|
||||
pcm.stats.GetHits++
|
||||
}
|
||||
// We found a map of values corresponding to k1, so now we need to look up k2 in that map
|
||||
//
|
||||
return m2.Get(k2)
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.GetMisses++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
|
||||
|
||||
if collectStats {
|
||||
pcm.stats.Puts++
|
||||
}
|
||||
// First does a map already exist for k1?
|
||||
//
|
||||
if m2, present := pcm.store.Get(k1); present {
|
||||
if collectStats {
|
||||
pcm.stats.PutHits++
|
||||
}
|
||||
_, present = m2.Put(k2, v)
|
||||
if !present {
|
||||
pcm.size++
|
||||
if collectStats {
|
||||
pcm.stats.CurSize = pcm.size
|
||||
if pcm.size > pcm.stats.MaxSize {
|
||||
pcm.stats.MaxSize = pcm.size
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No map found for k1, so we create it, add in our value, then store is
|
||||
//
|
||||
if collectStats {
|
||||
pcm.stats.PutMisses++
|
||||
m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
|
||||
} else {
|
||||
m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
|
||||
}
|
||||
|
||||
m2.Put(k2, v)
|
||||
pcm.store.Put(k1, m2)
|
||||
pcm.size++
|
||||
}
|
||||
}
|
||||
|
||||
type JPCMap2 struct {
|
||||
store map[int][]JPCEntry
|
||||
size int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
type JPCEntry struct {
|
||||
k1, k2, v *PredictionContext
|
||||
}
|
||||
|
||||
func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
|
||||
m := &JPCMap2{
|
||||
store: make(map[int][]JPCEntry, 1000),
|
||||
}
|
||||
if collectStats {
|
||||
m.stats = &JStatRec{
|
||||
Source: cType,
|
||||
Description: desc,
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
m.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
Statistics.AddJStatRec(m.stats)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func dHash(k1, k2 *PredictionContext) int {
|
||||
return k1.cachedHash*31 + k2.cachedHash
|
||||
}
|
||||
|
||||
func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Gets++
|
||||
}
|
||||
|
||||
h := dHash(k1, k2)
|
||||
var hClash bool
|
||||
for _, e := range pcm.store[h] {
|
||||
hClash = true
|
||||
if e.k1.Equals(k1) && e.k2.Equals(k2) {
|
||||
if collectStats {
|
||||
pcm.stats.GetHits++
|
||||
pcm.stats.GetHashConflicts++
|
||||
}
|
||||
return e.v, true
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.GetMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
pcm.stats.GetHashConflicts++
|
||||
}
|
||||
pcm.stats.GetNoEnt++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
pcm.stats.Puts++
|
||||
}
|
||||
h := dHash(k1, k2)
|
||||
var hClash bool
|
||||
for _, e := range pcm.store[h] {
|
||||
hClash = true
|
||||
if e.k1.Equals(k1) && e.k2.Equals(k2) {
|
||||
if collectStats {
|
||||
pcm.stats.PutHits++
|
||||
pcm.stats.PutHashConflicts++
|
||||
}
|
||||
return e.v, true
|
||||
}
|
||||
if collectStats {
|
||||
pcm.stats.PutMisses++
|
||||
}
|
||||
}
|
||||
if collectStats {
|
||||
if hClash {
|
||||
pcm.stats.PutHashConflicts++
|
||||
}
|
||||
}
|
||||
pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
|
||||
pcm.size++
|
||||
if collectStats {
|
||||
pcm.stats.CurSize = pcm.size
|
||||
if pcm.size > pcm.stats.MaxSize {
|
||||
pcm.stats.MaxSize = pcm.size
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
type VisitEntry struct {
|
||||
k *PredictionContext
|
||||
v *PredictionContext
|
||||
}
|
||||
type VisitRecord struct {
|
||||
store map[*PredictionContext]*PredictionContext
|
||||
len int
|
||||
stats *JStatRec
|
||||
}
|
||||
|
||||
type VisitList struct {
|
||||
cache *list.List
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
var visitListPool = VisitList{
|
||||
cache: list.New(),
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
|
||||
// NewVisitRecord returns a new VisitRecord instance from the pool if available.
|
||||
// Note that this "map" uses a pointer as a key because we are emulating the behavior of
|
||||
// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
|
||||
// which means is the key the same reference to an object rather than is it .equals() to another
|
||||
// object.
|
||||
func NewVisitRecord() *VisitRecord {
|
||||
visitListPool.lock.Lock()
|
||||
el := visitListPool.cache.Front()
|
||||
defer visitListPool.lock.Unlock()
|
||||
var vr *VisitRecord
|
||||
if el == nil {
|
||||
vr = &VisitRecord{
|
||||
store: make(map[*PredictionContext]*PredictionContext),
|
||||
}
|
||||
if collectStats {
|
||||
vr.stats = &JStatRec{
|
||||
Source: PredictionContextCacheCollection,
|
||||
Description: "VisitRecord",
|
||||
}
|
||||
// Track where we created it from if we are being asked to do so
|
||||
if runtimeConfig.statsTraceStacks {
|
||||
vr.stats.CreateStack = debug.Stack()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vr = el.Value.(*VisitRecord)
|
||||
visitListPool.cache.Remove(el)
|
||||
vr.store = make(map[*PredictionContext]*PredictionContext)
|
||||
}
|
||||
if collectStats {
|
||||
Statistics.AddJStatRec(vr.stats)
|
||||
}
|
||||
return vr
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Release() {
|
||||
vr.len = 0
|
||||
vr.store = nil
|
||||
if collectStats {
|
||||
vr.stats.MaxSize = 0
|
||||
vr.stats.CurSize = 0
|
||||
vr.stats.Gets = 0
|
||||
vr.stats.GetHits = 0
|
||||
vr.stats.GetMisses = 0
|
||||
vr.stats.GetHashConflicts = 0
|
||||
vr.stats.GetNoEnt = 0
|
||||
vr.stats.Puts = 0
|
||||
vr.stats.PutHits = 0
|
||||
vr.stats.PutMisses = 0
|
||||
vr.stats.PutHashConflicts = 0
|
||||
vr.stats.MaxSlotSize = 0
|
||||
}
|
||||
visitListPool.lock.Lock()
|
||||
visitListPool.cache.PushBack(vr)
|
||||
visitListPool.lock.Unlock()
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
vr.stats.Gets++
|
||||
}
|
||||
v := vr.store[k]
|
||||
if v != nil {
|
||||
if collectStats {
|
||||
vr.stats.GetHits++
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
if collectStats {
|
||||
vr.stats.GetNoEnt++
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
|
||||
if collectStats {
|
||||
vr.stats.Puts++
|
||||
}
|
||||
vr.store[k] = v
|
||||
vr.len++
|
||||
if collectStats {
|
||||
vr.stats.CurSize = vr.len
|
||||
if vr.len > vr.stats.MaxSize {
|
||||
vr.stats.MaxSize = vr.len
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer {
|
||||
// create a single token. NextToken will return l object after
|
||||
// Matching lexer rule(s). If you subclass to allow multiple token
|
||||
// emissions, then set l to the last token to be Matched or
|
||||
// something nonnil so that the auto token emit mechanism will not
|
||||
// something non nil so that the auto token emit mechanism will not
|
||||
// emit another token.
|
||||
lexer.token = nil
|
||||
|
||||
@ -111,6 +111,7 @@ const (
|
||||
LexerSkip = -3
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedConst
|
||||
const (
|
||||
LexerDefaultTokenChannel = TokenDefaultChannel
|
||||
LexerHidden = TokenHiddenChannel
|
||||
@ -118,7 +119,7 @@ const (
|
||||
LexerMaxCharValue = 0x10FFFF
|
||||
)
|
||||
|
||||
func (b *BaseLexer) reset() {
|
||||
func (b *BaseLexer) Reset() {
|
||||
// wack Lexer state variables
|
||||
if b.input != nil {
|
||||
b.input.Seek(0) // rewind the input
|
||||
@ -176,7 +177,7 @@ func (b *BaseLexer) safeMatch() (ret int) {
|
||||
return b.Interpreter.Match(b.input, b.mode)
|
||||
}
|
||||
|
||||
// Return a token from l source i.e., Match a token on the char stream.
|
||||
// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
|
||||
func (b *BaseLexer) NextToken() Token {
|
||||
if b.input == nil {
|
||||
panic("NextToken requires a non-nil input stream.")
|
||||
@ -205,9 +206,8 @@ func (b *BaseLexer) NextToken() Token {
|
||||
continueOuter := false
|
||||
for {
|
||||
b.thetype = TokenInvalidType
|
||||
ttype := LexerSkip
|
||||
|
||||
ttype = b.safeMatch()
|
||||
ttype := b.safeMatch()
|
||||
|
||||
if b.input.LA(1) == TokenEOF {
|
||||
b.hitEOF = true
|
||||
@ -234,12 +234,11 @@ func (b *BaseLexer) NextToken() Token {
|
||||
}
|
||||
}
|
||||
|
||||
// Instruct the lexer to Skip creating a token for current lexer rule
|
||||
// and look for another token. NextToken() knows to keep looking when
|
||||
// a lexer rule finishes with token set to SKIPTOKEN. Recall that
|
||||
// Skip instructs the lexer to Skip creating a token for current lexer rule
|
||||
// and look for another token. [NextToken] knows to keep looking when
|
||||
// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
|
||||
// if token==nil at end of any token rule, it creates one for you
|
||||
// and emits it.
|
||||
// /
|
||||
func (b *BaseLexer) Skip() {
|
||||
b.thetype = LexerSkip
|
||||
}
|
||||
@ -248,23 +247,29 @@ func (b *BaseLexer) More() {
|
||||
b.thetype = LexerMore
|
||||
}
|
||||
|
||||
// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
|
||||
// will be in force.
|
||||
func (b *BaseLexer) SetMode(m int) {
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
|
||||
// current lexer mode to the supplied mode m.
|
||||
func (b *BaseLexer) PushMode(m int) {
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("pushMode " + strconv.Itoa(m))
|
||||
}
|
||||
b.modeStack.Push(b.mode)
|
||||
b.mode = m
|
||||
}
|
||||
|
||||
// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
|
||||
// return to.
|
||||
func (b *BaseLexer) PopMode() int {
|
||||
if len(b.modeStack) == 0 {
|
||||
panic("Empty Stack")
|
||||
}
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
|
||||
}
|
||||
i, _ := b.modeStack.Pop()
|
||||
@ -280,7 +285,7 @@ func (b *BaseLexer) inputStream() CharStream {
|
||||
func (b *BaseLexer) SetInputStream(input CharStream) {
|
||||
b.input = nil
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
b.reset()
|
||||
b.Reset()
|
||||
b.input = input
|
||||
b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
|
||||
}
|
||||
@ -289,20 +294,19 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
|
||||
return b.tokenFactorySourcePair
|
||||
}
|
||||
|
||||
// By default does not support multiple emits per NextToken invocation
|
||||
// for efficiency reasons. Subclass and override l method, NextToken,
|
||||
// and GetToken (to push tokens into a list and pull from that list
|
||||
// rather than a single variable as l implementation does).
|
||||
// /
|
||||
// EmitToken by default does not support multiple emits per [NextToken] invocation
|
||||
// for efficiency reasons. Subclass and override this func, [NextToken],
|
||||
// and [GetToken] (to push tokens into a list and pull from that list
|
||||
// rather than a single variable as this implementation does).
|
||||
func (b *BaseLexer) EmitToken(token Token) {
|
||||
b.token = token
|
||||
}
|
||||
|
||||
// The standard method called to automatically emit a token at the
|
||||
// Emit is the standard method called to automatically emit a token at the
|
||||
// outermost lexical rule. The token object should point into the
|
||||
// char buffer start..stop. If there is a text override in 'text',
|
||||
// use that to set the token's text. Override l method to emit
|
||||
// custom Token objects or provide a Newfactory.
|
||||
// use that to set the token's text. Override this method to emit
|
||||
// custom [Token] objects or provide a new factory.
|
||||
// /
|
||||
func (b *BaseLexer) Emit() Token {
|
||||
t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
|
||||
@ -310,6 +314,7 @@ func (b *BaseLexer) Emit() Token {
|
||||
return t
|
||||
}
|
||||
|
||||
// EmitEOF emits an EOF token. By default, this is the last token emitted
|
||||
func (b *BaseLexer) EmitEOF() Token {
|
||||
cpos := b.GetCharPositionInLine()
|
||||
lpos := b.GetLine()
|
||||
@ -318,6 +323,7 @@ func (b *BaseLexer) EmitEOF() Token {
|
||||
return eof
|
||||
}
|
||||
|
||||
// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
|
||||
func (b *BaseLexer) GetCharPositionInLine() int {
|
||||
return b.Interpreter.GetCharPositionInLine()
|
||||
}
|
||||
@ -334,13 +340,12 @@ func (b *BaseLexer) SetType(t int) {
|
||||
b.thetype = t
|
||||
}
|
||||
|
||||
// What is the index of the current character of lookahead?///
|
||||
// GetCharIndex returns the index of the current character of lookahead
|
||||
func (b *BaseLexer) GetCharIndex() int {
|
||||
return b.input.Index()
|
||||
}
|
||||
|
||||
// Return the text Matched so far for the current token or any text override.
|
||||
// Set the complete text of l token it wipes any previous changes to the text.
|
||||
// GetText returns the text Matched so far for the current token or any text override.
|
||||
func (b *BaseLexer) GetText() string {
|
||||
if b.text != "" {
|
||||
return b.text
|
||||
@ -349,17 +354,20 @@ func (b *BaseLexer) GetText() string {
|
||||
return b.Interpreter.GetText(b.input)
|
||||
}
|
||||
|
||||
// SetText sets the complete text of this token; it wipes any previous changes to the text.
|
||||
func (b *BaseLexer) SetText(text string) {
|
||||
b.text = text
|
||||
}
|
||||
|
||||
// GetATN returns the ATN used by the lexer.
|
||||
func (b *BaseLexer) GetATN() *ATN {
|
||||
return b.Interpreter.ATN()
|
||||
}
|
||||
|
||||
// Return a list of all Token objects in input char stream.
|
||||
// Forces load of all tokens. Does not include EOF token.
|
||||
// /
|
||||
// GetAllTokens returns a list of all [Token] objects in input char stream.
|
||||
// Forces a load of all tokens that can be made from the input char stream.
|
||||
//
|
||||
// Does not include EOF token.
|
||||
func (b *BaseLexer) GetAllTokens() []Token {
|
||||
vl := b.Virt
|
||||
tokens := make([]Token, 0)
|
||||
@ -398,11 +406,13 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string {
|
||||
return "'" + b.getErrorDisplayForChar(c) + "'"
|
||||
}
|
||||
|
||||
// Lexers can normally Match any char in it's vocabulary after Matching
|
||||
// a token, so do the easy thing and just kill a character and hope
|
||||
// Recover can normally Match any char in its vocabulary after Matching
|
||||
// a token, so here we do the easy thing and just kill a character and hope
|
||||
// it all works out. You can instead use the rule invocation stack
|
||||
// to do sophisticated error recovery if you are in a fragment rule.
|
||||
// /
|
||||
//
|
||||
// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
|
||||
// a character that makes no sense to the recognizer.
|
||||
func (b *BaseLexer) Recover(re RecognitionException) {
|
||||
if b.input.LA(1) != TokenEOF {
|
||||
if _, ok := re.(*LexerNoViableAltException); ok {
|
||||
@ -7,14 +7,29 @@ package antlr
|
||||
import "strconv"
|
||||
|
||||
const (
|
||||
LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
|
||||
LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
|
||||
LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
|
||||
LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
|
||||
LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
|
||||
LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
|
||||
LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
|
||||
LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
|
||||
// LexerActionTypeChannel represents a [LexerChannelAction] action.
|
||||
LexerActionTypeChannel = 0
|
||||
|
||||
// LexerActionTypeCustom represents a [LexerCustomAction] action.
|
||||
LexerActionTypeCustom = 1
|
||||
|
||||
// LexerActionTypeMode represents a [LexerModeAction] action.
|
||||
LexerActionTypeMode = 2
|
||||
|
||||
// LexerActionTypeMore represents a [LexerMoreAction] action.
|
||||
LexerActionTypeMore = 3
|
||||
|
||||
// LexerActionTypePopMode represents a [LexerPopModeAction] action.
|
||||
LexerActionTypePopMode = 4
|
||||
|
||||
// LexerActionTypePushMode represents a [LexerPushModeAction] action.
|
||||
LexerActionTypePushMode = 5
|
||||
|
||||
// LexerActionTypeSkip represents a [LexerSkipAction] action.
|
||||
LexerActionTypeSkip = 6
|
||||
|
||||
// LexerActionTypeType represents a [LexerTypeAction] action.
|
||||
LexerActionTypeType = 7
|
||||
)
|
||||
|
||||
type LexerAction interface {
|
||||
@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction {
|
||||
return la
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) execute(lexer Lexer) {
|
||||
func (b *BaseLexerAction) execute(_ Lexer) {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) Hash() int {
|
||||
return b.actionType
|
||||
h := murmurInit(0)
|
||||
h = murmurUpdate(h, b.actionType)
|
||||
return murmurFinish(h, 1)
|
||||
}
|
||||
|
||||
func (b *BaseLexerAction) Equals(other LexerAction) bool {
|
||||
return b == other
|
||||
return b.actionType == other.getActionType()
|
||||
}
|
||||
|
||||
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
|
||||
// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
|
||||
//
|
||||
// <p>The {@code Skip} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
// The Skip command does not have any parameters, so this action is
|
||||
// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
|
||||
type LexerSkipAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction {
|
||||
return la
|
||||
}
|
||||
|
||||
// Provides a singleton instance of l parameterless lexer action.
|
||||
// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
|
||||
var LexerSkipActionINSTANCE = NewLexerSkipAction()
|
||||
|
||||
func (l *LexerSkipAction) execute(lexer Lexer) {
|
||||
lexer.Skip()
|
||||
}
|
||||
|
||||
// String returns a string representation of the current [LexerSkipAction].
|
||||
func (l *LexerSkipAction) String() string {
|
||||
return "skip"
|
||||
}
|
||||
|
||||
func (b *LexerSkipAction) Equals(other LexerAction) bool {
|
||||
return other.getActionType() == LexerActionTypeSkip
|
||||
}
|
||||
|
||||
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
|
||||
//
|
||||
// with the assigned type.
|
||||
@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string {
|
||||
return "actionType(" + strconv.Itoa(l.thetype) + ")"
|
||||
}
|
||||
|
||||
// Implements the {@code pushMode} lexer action by calling
|
||||
// {@link Lexer//pushMode} with the assigned mode.
|
||||
// LexerPushModeAction implements the pushMode lexer action by calling
|
||||
// [Lexer.pushMode] with the assigned mode.
|
||||
type LexerPushModeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
mode int
|
||||
}
|
||||
|
||||
@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string {
|
||||
return "pushMode(" + strconv.Itoa(l.mode) + ")"
|
||||
}
|
||||
|
||||
// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
|
||||
// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
|
||||
//
|
||||
// <p>The {@code popMode} command does not have any parameters, so l action is
|
||||
// implemented as a singleton instance exposed by {@link //INSTANCE}.</p>
|
||||
// The popMode command does not have any parameters, so this action is
|
||||
// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
|
||||
type LexerPopModeAction struct {
|
||||
*BaseLexerAction
|
||||
}
|
||||
@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string {
|
||||
return "more"
|
||||
}
|
||||
|
||||
// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
|
||||
// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
|
||||
// the assigned mode.
|
||||
type LexerModeAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
mode int
|
||||
}
|
||||
|
||||
@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the {@code channel} lexer action by calling
|
||||
// {@link Lexer//setChannel} with the assigned channel.
|
||||
// Constructs a New{@code channel} action with the specified channel value.
|
||||
// @param channel The channel value to pass to {@link Lexer//setChannel}.
|
||||
// LexerChannelAction implements the channel lexer action by calling
|
||||
// [Lexer.setChannel] with the assigned channel.
|
||||
//
|
||||
// Constructs a new channel action with the specified channel value.
|
||||
type LexerChannelAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
channel int
|
||||
}
|
||||
|
||||
// NewLexerChannelAction creates a channel lexer action by calling
|
||||
// [Lexer.setChannel] with the assigned channel.
|
||||
//
|
||||
// Constructs a new channel action with the specified channel value.
|
||||
func NewLexerChannelAction(channel int) *LexerChannelAction {
|
||||
l := new(LexerChannelAction)
|
||||
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
|
||||
@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string {
|
||||
// lexer actions, see {@link LexerActionExecutor//append} and
|
||||
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.</p>
|
||||
|
||||
// Constructs a Newindexed custom action by associating a character offset
|
||||
// with a {@link LexerAction}.
|
||||
//
|
||||
// <p>Note: This class is only required for lexer actions for which
|
||||
// {@link LexerAction//isPositionDependent} returns {@code true}.</p>
|
||||
//
|
||||
// @param offset The offset into the input {@link CharStream}, relative to
|
||||
// the token start index, at which the specified lexer action should be
|
||||
// executed.
|
||||
// @param action The lexer action to execute at a particular offset in the
|
||||
// input {@link CharStream}.
|
||||
type LexerIndexedCustomAction struct {
|
||||
*BaseLexerAction
|
||||
|
||||
offset int
|
||||
lexerAction LexerAction
|
||||
isPositionDependent bool
|
||||
}
|
||||
|
||||
// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
|
||||
// with a [LexerAction].
|
||||
//
|
||||
// Note: This class is only required for lexer actions for which
|
||||
// [LexerAction.isPositionDependent] returns true.
|
||||
//
|
||||
// The offset points into the input [CharStream], relative to
|
||||
// the token start index, at which the specified lexerAction should be
|
||||
// executed.
|
||||
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
|
||||
|
||||
l := new(LexerIndexedCustomAction)
|
||||
@ -29,28 +29,20 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
|
||||
l.lexerActions = lexerActions
|
||||
|
||||
// Caches the result of {@link //hashCode} since the hash code is an element
|
||||
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
|
||||
l.cachedHash = murmurInit(57)
|
||||
// of the performance-critical {@link ATNConfig//hashCode} operation.
|
||||
l.cachedHash = murmurInit(0)
|
||||
for _, a := range lexerActions {
|
||||
l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
|
||||
}
|
||||
l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Creates a {@link LexerActionExecutor} which executes the actions for
|
||||
// the input {@code lexerActionExecutor} followed by a specified
|
||||
// {@code lexerAction}.
|
||||
//
|
||||
// @param lexerActionExecutor The executor for actions already traversed by
|
||||
// the lexer while Matching a token within a particular
|
||||
// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
|
||||
// though it were an empty executor.
|
||||
// @param lexerAction The lexer action to execute after the actions
|
||||
// specified in {@code lexerActionExecutor}.
|
||||
//
|
||||
// @return A {@link LexerActionExecutor} for executing the combine actions
|
||||
// of {@code lexerActionExecutor} and {@code lexerAction}.
|
||||
// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
|
||||
// the input [LexerActionExecutor] followed by a specified
|
||||
// [LexerAction].
|
||||
// TODO: This does not match the Java code
|
||||
func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
|
||||
if lexerActionExecutor == nil {
|
||||
return NewLexerActionExecutor([]LexerAction{lexerAction})
|
||||
@ -59,47 +51,42 @@ func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAc
|
||||
return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
|
||||
}
|
||||
|
||||
// Creates a {@link LexerActionExecutor} which encodes the current offset
|
||||
// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
|
||||
// for position-dependent lexer actions.
|
||||
//
|
||||
// <p>Normally, when the executor encounters lexer actions where
|
||||
// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
|
||||
// {@link IntStream//seek} on the input {@link CharStream} to set the input
|
||||
// position to the <em>end</em> of the current token. This behavior provides
|
||||
// for efficient DFA representation of lexer actions which appear at the end
|
||||
// Normally, when the executor encounters lexer actions where
|
||||
// [LexerAction.isPositionDependent] returns true, it calls
|
||||
// [IntStream.Seek] on the input [CharStream] to set the input
|
||||
// position to the end of the current token. This behavior provides
|
||||
// for efficient [DFA] representation of lexer actions which appear at the end
|
||||
// of a lexer rule, even when the lexer rule Matches a variable number of
|
||||
// characters.</p>
|
||||
// characters.
|
||||
//
|
||||
// <p>Prior to traversing a Match transition in the ATN, the current offset
|
||||
// Prior to traversing a Match transition in the [ATN], the current offset
|
||||
// from the token start index is assigned to all position-dependent lexer
|
||||
// actions which have not already been assigned a fixed offset. By storing
|
||||
// the offsets relative to the token start index, the DFA representation of
|
||||
// the offsets relative to the token start index, the [DFA] representation of
|
||||
// lexer actions which appear in the middle of tokens remains efficient due
|
||||
// to sharing among tokens of the same length, regardless of their absolute
|
||||
// position in the input stream.</p>
|
||||
// to sharing among tokens of the same Length, regardless of their absolute
|
||||
// position in the input stream.
|
||||
//
|
||||
// <p>If the current executor already has offsets assigned to all
|
||||
// position-dependent lexer actions, the method returns {@code this}.</p>
|
||||
// If the current executor already has offsets assigned to all
|
||||
// position-dependent lexer actions, the method returns this instance.
|
||||
//
|
||||
// @param offset The current offset to assign to all position-dependent
|
||||
// The offset is assigned to all position-dependent
|
||||
// lexer actions which do not already have offsets assigned.
|
||||
//
|
||||
// @return A {@link LexerActionExecutor} which stores input stream offsets
|
||||
// The func returns a [LexerActionExecutor] that stores input stream offsets
|
||||
// for all position-dependent lexer actions.
|
||||
// /
|
||||
func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
|
||||
var updatedLexerActions []LexerAction
|
||||
for i := 0; i < len(l.lexerActions); i++ {
|
||||
_, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
|
||||
if l.lexerActions[i].getIsPositionDependent() && !ok {
|
||||
if updatedLexerActions == nil {
|
||||
updatedLexerActions = make([]LexerAction, 0)
|
||||
|
||||
for _, a := range l.lexerActions {
|
||||
updatedLexerActions = append(updatedLexerActions, a)
|
||||
}
|
||||
updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
|
||||
updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
|
||||
}
|
||||
|
||||
updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
|
||||
}
|
||||
}
|
||||
@ -10,10 +10,8 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var (
|
||||
LexerATNSimulatorDebug = false
|
||||
LexerATNSimulatorDFADebug = false
|
||||
|
||||
LexerATNSimulatorMinDFAEdge = 0
|
||||
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
|
||||
|
||||
@ -32,11 +30,11 @@ type ILexerATNSimulator interface {
|
||||
}
|
||||
|
||||
type LexerATNSimulator struct {
|
||||
*BaseATNSimulator
|
||||
BaseATNSimulator
|
||||
|
||||
recog Lexer
|
||||
predictionMode int
|
||||
mergeCache DoubleDict
|
||||
mergeCache *JPCMap2
|
||||
startIndex int
|
||||
Line int
|
||||
CharPositionInLine int
|
||||
@ -46,27 +44,35 @@ type LexerATNSimulator struct {
|
||||
}
|
||||
|
||||
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
|
||||
l := new(LexerATNSimulator)
|
||||
|
||||
l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
|
||||
l := &LexerATNSimulator{
|
||||
BaseATNSimulator: BaseATNSimulator{
|
||||
atn: atn,
|
||||
sharedContextCache: sharedContextCache,
|
||||
},
|
||||
}
|
||||
|
||||
l.decisionToDFA = decisionToDFA
|
||||
l.recog = recog
|
||||
|
||||
// The current token's starting index into the character stream.
|
||||
// Shared across DFA to ATN simulation in case the ATN fails and the
|
||||
// DFA did not have a previous accept state. In l case, we use the
|
||||
// ATN-generated exception object.
|
||||
l.startIndex = -1
|
||||
// line number 1..n within the input///
|
||||
|
||||
// line number 1..n within the input
|
||||
l.Line = 1
|
||||
|
||||
// The index of the character relative to the beginning of the line
|
||||
// 0..n-1///
|
||||
// 0..n-1
|
||||
l.CharPositionInLine = 0
|
||||
|
||||
l.mode = LexerDefaultMode
|
||||
|
||||
// Used during DFA/ATN exec to record the most recent accept configuration
|
||||
// info
|
||||
l.prevAccept = NewSimState()
|
||||
// done
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() {
|
||||
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
startState := l.atn.modeToStartState[l.mode]
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
|
||||
}
|
||||
oldMode := l.mode
|
||||
@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
|
||||
predict := l.execATN(input, next)
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
|
||||
}
|
||||
return predict
|
||||
@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
|
||||
|
||||
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("start state closure=" + ds0.configs.String())
|
||||
}
|
||||
if ds0.isAcceptState {
|
||||
// allow zero-length tokens
|
||||
// allow zero-Length tokens
|
||||
l.captureSimState(l.prevAccept, input, ds0)
|
||||
}
|
||||
t := input.LA(1)
|
||||
s := ds0 // s is current/from DFA state
|
||||
|
||||
for { // while more work
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("execATN loop starting closure: " + s.configs.String())
|
||||
}
|
||||
|
||||
@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
|
||||
}
|
||||
}
|
||||
t = input.LA(1)
|
||||
s = target // flip current DFA target becomes Newsrc/from state
|
||||
s = target // flip current DFA target becomes new src/from state
|
||||
}
|
||||
|
||||
return l.failOrAccept(l.prevAccept, input, s.configs, t)
|
||||
@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState
|
||||
return nil
|
||||
}
|
||||
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
|
||||
if LexerATNSimulatorDebug && target != nil {
|
||||
if runtimeConfig.lexerATNSimulatorDebug && target != nil {
|
||||
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
// Compute a target state for an edge in the DFA, and attempt to add the
|
||||
// computed state and corresponding edge to the DFA.
|
||||
// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
|
||||
// computed state and corresponding edge to the [DFA].
|
||||
//
|
||||
// @param input The input stream
|
||||
// @param s The current DFA state
|
||||
// @param t The next input symbol
|
||||
//
|
||||
// @return The computed target DFA state for the given input symbol
|
||||
// {@code t}. If {@code t} does not lead to a valid DFA state, l method
|
||||
// returns {@link //ERROR}.
|
||||
// The func returns the computed target [DFA] state for the given input symbol t.
|
||||
// If this does not lead to a valid [DFA] state, this method
|
||||
// returns ATNSimulatorError.
|
||||
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
|
||||
reach := NewOrderedATNConfigSet()
|
||||
|
||||
// if we don't find an existing DFA state
|
||||
// Fill reach starting from closure, following t transitions
|
||||
l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
|
||||
l.getReachableConfigSet(input, s.configs, reach, t)
|
||||
|
||||
if len(reach.configs) == 0 { // we got nowhere on t from s
|
||||
if !reach.hasSemanticContext {
|
||||
// we got nowhere on t, don't panic out l knowledge it'd
|
||||
// cause a failover from DFA later.
|
||||
// cause a fail-over from DFA later.
|
||||
l.addDFAEdge(s, t, ATNSimulatorError, nil)
|
||||
}
|
||||
// stop when we can't Match any more char
|
||||
return ATNSimulatorError
|
||||
}
|
||||
// Add an edge from s to target DFA found/created for reach
|
||||
return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
|
||||
return l.addDFAEdge(s, t, nil, reach)
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
|
||||
func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
|
||||
if l.prevAccept.dfaState != nil {
|
||||
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
|
||||
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
|
||||
@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream,
|
||||
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
|
||||
}
|
||||
|
||||
// Given a starting configuration set, figure out all ATN configurations
|
||||
// we can reach upon input {@code t}. Parameter {@code reach} is a return
|
||||
// parameter.
|
||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
|
||||
// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
|
||||
// we can reach upon input t.
|
||||
//
|
||||
// Parameter reach is a return parameter.
|
||||
func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
|
||||
// l is used to Skip processing for configs which have a lower priority
|
||||
// than a config that already reached an accept state for the same rule
|
||||
// than a runtimeConfig that already reached an accept state for the same rule
|
||||
SkipAlt := ATNInvalidAltNumber
|
||||
|
||||
for _, cfg := range closure.GetItems() {
|
||||
currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
|
||||
if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
|
||||
for _, cfg := range closure.configs {
|
||||
currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
|
||||
if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
|
||||
continue
|
||||
}
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
|
||||
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
|
||||
fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
|
||||
}
|
||||
|
||||
for _, trans := range cfg.GetState().GetTransitions() {
|
||||
target := l.getReachableTarget(trans, t)
|
||||
if target != nil {
|
||||
lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
|
||||
lexerActionExecutor := cfg.lexerActionExecutor
|
||||
if lexerActionExecutor != nil {
|
||||
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
|
||||
}
|
||||
treatEOFAsEpsilon := (t == TokenEOF)
|
||||
config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
|
||||
treatEOFAsEpsilon := t == TokenEOF
|
||||
config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
|
||||
if l.closure(input, config, reach,
|
||||
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
|
||||
// any remaining configs for l alt have a lower priority
|
||||
@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Printf("ACTION %v\n", lexerActionExecutor)
|
||||
}
|
||||
// seek to after last char in token
|
||||
@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
|
||||
func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
|
||||
configs := NewOrderedATNConfigSet()
|
||||
for i := 0; i < len(p.GetTransitions()); i++ {
|
||||
target := p.GetTransitions()[i].getTarget()
|
||||
@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord
|
||||
return configs
|
||||
}
|
||||
|
||||
// Since the alternatives within any lexer decision are ordered by
|
||||
// preference, l method stops pursuing the closure as soon as an accept
|
||||
// closure since the alternatives within any lexer decision are ordered by
|
||||
// preference, this method stops pursuing the closure as soon as an accept
|
||||
// state is reached. After the first accept state is reached by depth-first
|
||||
// search from {@code config}, all other (potentially reachable) states for
|
||||
// l rule would have a lower priority.
|
||||
// search from runtimeConfig, all other (potentially reachable) states for
|
||||
// this rule would have a lower priority.
|
||||
//
|
||||
// @return {@code true} if an accept state is reached, otherwise
|
||||
// {@code false}.
|
||||
func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
|
||||
// The func returns true if an accept state is reached.
|
||||
func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
|
||||
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("closure(" + config.String() + ")")
|
||||
}
|
||||
|
||||
_, ok := config.state.(*RuleStopState)
|
||||
if ok {
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
if l.recog != nil {
|
||||
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
|
||||
} else {
|
||||
@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co
|
||||
}
|
||||
|
||||
// side-effect: can alter configs.hasSemanticContext
|
||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
|
||||
configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
|
||||
func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
|
||||
configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
|
||||
|
||||
var cfg *LexerATNConfig
|
||||
var cfg *ATNConfig
|
||||
|
||||
if trans.getSerializationType() == TransitionRULE {
|
||||
|
||||
@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
|
||||
|
||||
pt := trans.(*PredicateTransition)
|
||||
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
|
||||
}
|
||||
configs.SetHasSemanticContext(true)
|
||||
configs.hasSemanticContext = true
|
||||
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
|
||||
cfg = NewLexerATNConfig4(config, trans.getTarget())
|
||||
}
|
||||
@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
|
||||
// TODO: if the entry rule is invoked recursively, some
|
||||
// actions may be executed during the recursive call. The
|
||||
// problem can appear when hasEmptyPath() is true but
|
||||
// isEmpty() is false. In l case, the config needs to be
|
||||
// isEmpty() is false. In this case, the config needs to be
|
||||
// split into two contexts - one with just the empty path
|
||||
// and another with everything but the empty path.
|
||||
// Unfortunately, the current algorithm does not allow
|
||||
@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Evaluate a predicate specified in the lexer.
|
||||
// evaluatePredicate eEvaluates a predicate specified in the lexer.
|
||||
//
|
||||
// <p>If {@code speculative} is {@code true}, l method was called before
|
||||
// {@link //consume} for the Matched character. This method should call
|
||||
// {@link //consume} before evaluating the predicate to ensure position
|
||||
// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
|
||||
// and {@link Lexer//getcolumn}, properly reflect the current
|
||||
// lexer state. This method should restore {@code input} and the simulator
|
||||
// to the original state before returning (i.e. undo the actions made by the
|
||||
// call to {@link //consume}.</p>
|
||||
// If speculative is true, this method was called before
|
||||
// [consume] for the Matched character. This method should call
|
||||
// [consume] before evaluating the predicate to ensure position
|
||||
// sensitive values, including [GetText], [GetLine],
|
||||
// and [GetColumn], properly reflect the current
|
||||
// lexer state. This method should restore input and the simulator
|
||||
// to the original state before returning, i.e. undo the actions made by the
|
||||
// call to [Consume].
|
||||
//
|
||||
// @param input The input stream.
|
||||
// @param ruleIndex The rule containing the predicate.
|
||||
// @param predIndex The index of the predicate within the rule.
|
||||
// @param speculative {@code true} if the current index in {@code input} is
|
||||
// one character before the predicate's location.
|
||||
//
|
||||
// @return {@code true} if the specified predicate evaluates to
|
||||
// {@code true}.
|
||||
// /
|
||||
// The func returns true if the specified predicate evaluates to true.
|
||||
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
|
||||
// assume true if no recognizer was provided
|
||||
if l.recog == nil {
|
||||
@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream
|
||||
settings.dfaState = dfaState
|
||||
}
|
||||
|
||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
|
||||
func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
|
||||
if to == nil && cfgs != nil {
|
||||
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
|
||||
// marker indicating dynamic predicate evaluation makes l edge
|
||||
@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
// TJP notes: next time through the DFA, we see a pred again and eval.
|
||||
// If that gets us to a previously created (but dangling) DFA
|
||||
// state, we can continue in pure DFA mode from there.
|
||||
// /
|
||||
suppressEdge := cfgs.HasSemanticContext()
|
||||
cfgs.SetHasSemanticContext(false)
|
||||
|
||||
//
|
||||
suppressEdge := cfgs.hasSemanticContext
|
||||
cfgs.hasSemanticContext = false
|
||||
to = l.addDFAState(cfgs, true)
|
||||
|
||||
if suppressEdge {
|
||||
@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
// Only track edges within the DFA bounds
|
||||
return to
|
||||
}
|
||||
if LexerATNSimulatorDebug {
|
||||
if runtimeConfig.lexerATNSimulatorDebug {
|
||||
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
|
||||
}
|
||||
l.atn.edgeMu.Lock()
|
||||
@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
|
||||
// configurations already. This method also detects the first
|
||||
// configuration containing an ATN rule stop state. Later, when
|
||||
// traversing the DFA, we will know which rule to accept.
|
||||
func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
|
||||
func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
|
||||
|
||||
proposed := NewDFAState(-1, configs)
|
||||
var firstConfigWithRuleStopState ATNConfig
|
||||
|
||||
for _, cfg := range configs.GetItems() {
|
||||
var firstConfigWithRuleStopState *ATNConfig
|
||||
|
||||
for _, cfg := range configs.configs {
|
||||
_, ok := cfg.GetState().(*RuleStopState)
|
||||
|
||||
if ok {
|
||||
@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
|
||||
}
|
||||
if firstConfigWithRuleStopState != nil {
|
||||
proposed.isAcceptState = true
|
||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
|
||||
proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
|
||||
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
|
||||
}
|
||||
dfa := l.decisionToDFA[l.mode]
|
||||
|
||||
l.atn.stateMu.Lock()
|
||||
defer l.atn.stateMu.Unlock()
|
||||
existing, present := dfa.states.Get(proposed)
|
||||
existing, present := dfa.Get(proposed)
|
||||
if present {
|
||||
|
||||
// This state was already present, so just return it.
|
||||
@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
|
||||
|
||||
// We need to add the new state
|
||||
//
|
||||
proposed.stateNumber = dfa.states.Len()
|
||||
configs.SetReadOnly(true)
|
||||
proposed.stateNumber = dfa.Len()
|
||||
configs.readOnly = true
|
||||
configs.configLookup = nil // Not needed now
|
||||
proposed.configs = configs
|
||||
dfa.states.Put(proposed)
|
||||
dfa.Put(proposed)
|
||||
}
|
||||
if !suppressEdge {
|
||||
dfa.setS0(proposed)
|
||||
@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA {
|
||||
return l.decisionToDFA[mode]
|
||||
}
|
||||
|
||||
// Get the text Matched so far for the current token.
|
||||
// GetText returns the text [Match]ed so far for the current token.
|
||||
func (l *LexerATNSimulator) GetText(input CharStream) string {
|
||||
// index is first lookahead char, don't include.
|
||||
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
|
||||
@ -14,11 +14,11 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
|
||||
return la
|
||||
}
|
||||
|
||||
// - Special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if {@code seeThruPreds==false}.
|
||||
//
|
||||
// /
|
||||
const (
|
||||
// LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
|
||||
// a predicate during analysis if
|
||||
//
|
||||
// seeThruPreds==false
|
||||
LL1AnalyzerHitPred = TokenInvalidType
|
||||
)
|
||||
|
||||
@ -38,11 +38,12 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
count := len(s.GetTransitions())
|
||||
look := make([]*IntervalSet, count)
|
||||
for alt := 0; alt < count; alt++ {
|
||||
|
||||
look[alt] = NewIntervalSet()
|
||||
lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
|
||||
seeThruPreds := false // fail to get lookahead upon pred
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
|
||||
// Wipe out lookahead for la alternative if we found nothing
|
||||
lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
|
||||
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
|
||||
|
||||
// Wipe out lookahead for la alternative if we found nothing,
|
||||
// or we had a predicate when we !seeThruPreds
|
||||
if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
|
||||
look[alt] = nil
|
||||
@ -51,32 +52,31 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
|
||||
return look
|
||||
}
|
||||
|
||||
// *
|
||||
// Compute set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
// Look computes the set of tokens that can follow s in the [ATN] in the
|
||||
// specified ctx.
|
||||
//
|
||||
// <p>If {@code ctx} is {@code nil} and the end of the rule containing
|
||||
// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
|
||||
// If {@code ctx} is not {@code nil} and the end of the outermost rule is
|
||||
// reached, {@link Token//EOF} is added to the result set.</p>
|
||||
// If ctx is nil and the end of the rule containing
|
||||
// s is reached, [EPSILON] is added to the result set.
|
||||
//
|
||||
// @param s the ATN state
|
||||
// @param stopState the ATN state to stop at. This can be a
|
||||
// {@link BlockEndState} to detect epsilon paths through a closure.
|
||||
// @param ctx the complete parser context, or {@code nil} if the context
|
||||
// If ctx is not nil and the end of the outermost rule is
|
||||
// reached, [EOF] is added to the result set.
|
||||
//
|
||||
// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
|
||||
// [BlockEndState] to detect epsilon paths through a closure.
|
||||
//
|
||||
// Parameter ctx is the complete parser context, or nil if the context
|
||||
// should be ignored
|
||||
//
|
||||
// @return The set of tokens that can follow {@code s} in the ATN in the
|
||||
// specified {@code ctx}.
|
||||
// /
|
||||
// The func returns the set of tokens that can follow s in the [ATN] in the
|
||||
// specified ctx.
|
||||
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
|
||||
r := NewIntervalSet()
|
||||
seeThruPreds := true // ignore preds get all lookahead
|
||||
var lookContext PredictionContext
|
||||
var lookContext *PredictionContext
|
||||
if ctx != nil {
|
||||
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
|
||||
}
|
||||
la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
|
||||
la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
|
||||
NewBitSet(), true, true)
|
||||
return r
|
||||
}
|
||||
|
||||
@ -110,16 +110,17 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
|
||||
// outermost context is reached. This parameter has no effect if {@code ctx}
|
||||
// is {@code nil}.
|
||||
|
||||
func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
|
||||
calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
|
||||
|
||||
returnState := la.atn.states[ctx.getReturnState(i)]
|
||||
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
|
||||
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
|
||||
|
||||
c := NewBaseATNConfig6(s, 0, ctx)
|
||||
c := NewATNConfig6(s, 0, ctx)
|
||||
|
||||
if lookBusy.Contains(c) {
|
||||
return
|
||||
@ -151,7 +152,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
|
||||
return
|
||||
}
|
||||
|
||||
if ctx != BasePredictionContextEMPTY {
|
||||
if ctx.pcType != PredictionContextEmpty {
|
||||
removed := calledRuleStack.contains(s.GetRuleIndex())
|
||||
defer func() {
|
||||
if removed {
|
||||
@ -202,7 +203,8 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
|
||||
}
|
||||
}
|
||||
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
|
||||
calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
|
||||
|
||||
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
|
||||
|
||||
47
vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
generated
vendored
Normal file
47
vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
//go:build !antlr.stats
|
||||
|
||||
package antlr
|
||||
|
||||
// This file is compiled when the build configuration antlr.stats is not enabled.
|
||||
// which then allows the compiler to optimize out all the code that is not used.
|
||||
const collectStats = false
|
||||
|
||||
// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
|
||||
type goRunStats struct {
|
||||
}
|
||||
|
||||
var Statistics = &goRunStats{}
|
||||
|
||||
func (s *goRunStats) AddJStatRec(_ *JStatRec) {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) CollectionAnomalies() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) Reset() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
func (s *goRunStats) Report(dir string, prefix string) error {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *goRunStats) Analyze() {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
}
|
||||
|
||||
type statsOption func(*goRunStats) error
|
||||
|
||||
func (s *goRunStats) Configure(options ...statsOption) error {
|
||||
// Do nothing - compiler will optimize this out (hopefully)
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithTopN(topN int) statsOption {
|
||||
return func(s *goRunStats) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -48,8 +48,10 @@ type BaseParser struct {
|
||||
_SyntaxErrors int
|
||||
}
|
||||
|
||||
// p.is all the parsing support code essentially most of it is error
|
||||
// recovery stuff.//
|
||||
// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
|
||||
// recovery stuff.
|
||||
//
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBaseParser(input TokenStream) *BaseParser {
|
||||
|
||||
p := new(BaseParser)
|
||||
@ -58,39 +60,46 @@ func NewBaseParser(input TokenStream) *BaseParser {
|
||||
|
||||
// The input stream.
|
||||
p.input = nil
|
||||
|
||||
// The error handling strategy for the parser. The default value is a new
|
||||
// instance of {@link DefaultErrorStrategy}.
|
||||
p.errHandler = NewDefaultErrorStrategy()
|
||||
p.precedenceStack = make([]int, 0)
|
||||
p.precedenceStack.Push(0)
|
||||
// The {@link ParserRuleContext} object for the currently executing rule.
|
||||
|
||||
// The ParserRuleContext object for the currently executing rule.
|
||||
// p.is always non-nil during the parsing process.
|
||||
p.ctx = nil
|
||||
// Specifies whether or not the parser should construct a parse tree during
|
||||
|
||||
// Specifies whether the parser should construct a parse tree during
|
||||
// the parsing process. The default value is {@code true}.
|
||||
p.BuildParseTrees = true
|
||||
// When {@link //setTrace}{@code (true)} is called, a reference to the
|
||||
// {@link TraceListener} is stored here so it can be easily removed in a
|
||||
// later call to {@link //setTrace}{@code (false)}. The listener itself is
|
||||
|
||||
// When setTrace(true) is called, a reference to the
|
||||
// TraceListener is stored here, so it can be easily removed in a
|
||||
// later call to setTrace(false). The listener itself is
|
||||
// implemented as a parser listener so p.field is not directly used by
|
||||
// other parser methods.
|
||||
p.tracer = nil
|
||||
// The list of {@link ParseTreeListener} listeners registered to receive
|
||||
|
||||
// The list of ParseTreeListener listeners registered to receive
|
||||
// events during the parse.
|
||||
p.parseListeners = nil
|
||||
|
||||
// The number of syntax errors Reported during parsing. p.value is
|
||||
// incremented each time {@link //NotifyErrorListeners} is called.
|
||||
// incremented each time NotifyErrorListeners is called.
|
||||
p._SyntaxErrors = 0
|
||||
p.SetInputStream(input)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// p.field maps from the serialized ATN string to the deserialized {@link
|
||||
// ATN} with
|
||||
// This field maps from the serialized ATN string to the deserialized [ATN] with
|
||||
// bypass alternatives.
|
||||
//
|
||||
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
|
||||
// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
|
||||
//
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var bypassAltsAtnCache = make(map[string]int)
|
||||
|
||||
// reset the parser's state//
|
||||
@ -143,10 +152,13 @@ func (p *BaseParser) Match(ttype int) Token {
|
||||
p.Consume()
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.HasError() {
|
||||
return nil
|
||||
}
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
// we must have conjured up a Newtoken during single token
|
||||
// insertion
|
||||
// if it's not the current symbol
|
||||
|
||||
// we must have conjured up a new token during single token
|
||||
// insertion if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
@ -178,9 +190,8 @@ func (p *BaseParser) MatchWildcard() Token {
|
||||
} else {
|
||||
t = p.errHandler.RecoverInline(p)
|
||||
if p.BuildParseTrees && t.GetTokenIndex() == -1 {
|
||||
// we must have conjured up a Newtoken during single token
|
||||
// insertion
|
||||
// if it's not the current symbol
|
||||
// we must have conjured up a new token during single token
|
||||
// insertion if it's not the current symbol
|
||||
p.ctx.AddErrorNode(t)
|
||||
}
|
||||
}
|
||||
@ -202,33 +213,27 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener {
|
||||
return p.parseListeners
|
||||
}
|
||||
|
||||
// Registers {@code listener} to receive events during the parsing process.
|
||||
// AddParseListener registers listener to receive events during the parsing process.
|
||||
//
|
||||
// <p>To support output-preserving grammar transformations (including but not
|
||||
// To support output-preserving grammar transformations (including but not
|
||||
// limited to left-recursion removal, automated left-factoring, and
|
||||
// optimized code generation), calls to listener methods during the parse
|
||||
// may differ substantially from calls made by
|
||||
// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
|
||||
// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
|
||||
// particular, rule entry and exit events may occur in a different order
|
||||
// during the parse than after the parser. In addition, calls to certain
|
||||
// rule entry methods may be omitted.</p>
|
||||
// rule entry methods may be omitted.
|
||||
//
|
||||
// <p>With the following specific exceptions, calls to listener events are
|
||||
// <em>deterministic</em>, i.e. for identical input the calls to listener
|
||||
// methods will be the same.</p>
|
||||
// With the following specific exceptions, calls to listener events are
|
||||
// deterministic, i.e. for identical input the calls to listener
|
||||
// methods will be the same.
|
||||
//
|
||||
// <ul>
|
||||
// <li>Alterations to the grammar used to generate code may change the
|
||||
// behavior of the listener calls.</li>
|
||||
// <li>Alterations to the command line options passed to ANTLR 4 when
|
||||
// generating the parser may change the behavior of the listener calls.</li>
|
||||
// <li>Changing the version of the ANTLR Tool used to generate the parser
|
||||
// may change the behavior of the listener calls.</li>
|
||||
// </ul>
|
||||
//
|
||||
// @param listener the listener to add
|
||||
//
|
||||
// @panics nilPointerException if {@code} listener is {@code nil}
|
||||
// - Alterations to the grammar used to generate code may change the
|
||||
// behavior of the listener calls.
|
||||
// - Alterations to the command line options passed to ANTLR 4 when
|
||||
// generating the parser may change the behavior of the listener calls.
|
||||
// - Changing the version of the ANTLR Tool used to generate the parser
|
||||
// may change the behavior of the listener calls.
|
||||
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
if listener == nil {
|
||||
panic("listener")
|
||||
@ -239,11 +244,10 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
|
||||
p.parseListeners = append(p.parseListeners, listener)
|
||||
}
|
||||
|
||||
// Remove {@code listener} from the list of parse listeners.
|
||||
// RemoveParseListener removes listener from the list of parse listeners.
|
||||
//
|
||||
// <p>If {@code listener} is {@code nil} or has not been added as a parse
|
||||
// listener, p.method does nothing.</p>
|
||||
// @param listener the listener to remove
|
||||
// If listener is nil or has not been added as a parse
|
||||
// listener, this func does nothing.
|
||||
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
|
||||
|
||||
if p.parseListeners != nil {
|
||||
@ -274,7 +278,7 @@ func (p *BaseParser) removeParseListeners() {
|
||||
p.parseListeners = nil
|
||||
}
|
||||
|
||||
// Notify any parse listeners of an enter rule event.
|
||||
// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
|
||||
func (p *BaseParser) TriggerEnterRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
ctx := p.ctx
|
||||
@ -285,9 +289,7 @@ func (p *BaseParser) TriggerEnterRuleEvent() {
|
||||
}
|
||||
}
|
||||
|
||||
// Notify any parse listeners of an exit rule event.
|
||||
//
|
||||
// @see //addParseListener
|
||||
// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
|
||||
func (p *BaseParser) TriggerExitRuleEvent() {
|
||||
if p.parseListeners != nil {
|
||||
// reverse order walk of listeners
|
||||
@ -314,19 +316,16 @@ func (p *BaseParser) GetTokenFactory() TokenFactory {
|
||||
return p.input.GetTokenSource().GetTokenFactory()
|
||||
}
|
||||
|
||||
// Tell our token source and error strategy about a Newway to create tokens.//
|
||||
// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
|
||||
func (p *BaseParser) setTokenFactory(factory TokenFactory) {
|
||||
p.input.GetTokenSource().setTokenFactory(factory)
|
||||
}
|
||||
|
||||
// The ATN with bypass alternatives is expensive to create so we create it
|
||||
// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
|
||||
// lazily.
|
||||
//
|
||||
// @panics UnsupportedOperationException if the current parser does not
|
||||
// implement the {@link //getSerializedATN()} method.
|
||||
func (p *BaseParser) GetATNWithBypassAlts() {
|
||||
|
||||
// TODO
|
||||
// TODO - Implement this?
|
||||
panic("Not implemented!")
|
||||
|
||||
// serializedAtn := p.getSerializedATN()
|
||||
@ -354,6 +353,7 @@ func (p *BaseParser) GetATNWithBypassAlts() {
|
||||
// String id = m.Get("ID")
|
||||
// </pre>
|
||||
|
||||
//goland:noinspection GoUnusedParameter
|
||||
func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
|
||||
|
||||
panic("NewParseTreePatternMatcher not implemented!")
|
||||
@ -386,14 +386,16 @@ func (p *BaseParser) GetTokenStream() TokenStream {
|
||||
return p.input
|
||||
}
|
||||
|
||||
// Set the token stream and reset the parser.//
|
||||
// SetTokenStream installs input as the token stream and resets the parser.
|
||||
func (p *BaseParser) SetTokenStream(input TokenStream) {
|
||||
p.input = nil
|
||||
p.reset()
|
||||
p.input = input
|
||||
}
|
||||
|
||||
// Match needs to return the current input symbol, which gets put
|
||||
// GetCurrentToken returns the current token at LT(1).
|
||||
//
|
||||
// [Match] needs to return the current input symbol, which gets put
|
||||
// into the label for the associated token ref e.g., x=ID.
|
||||
func (p *BaseParser) GetCurrentToken() Token {
|
||||
return p.input.LT(1)
|
||||
@ -446,7 +448,7 @@ func (p *BaseParser) addContextToParseTree() {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
|
||||
func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
|
||||
p.SetState(state)
|
||||
p.ctx = localctx
|
||||
p.ctx.SetStart(p.input.LT(1))
|
||||
@ -474,7 +476,7 @@ func (p *BaseParser) ExitRule() {
|
||||
|
||||
func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
|
||||
localctx.SetAltNumber(altNum)
|
||||
// if we have Newlocalctx, make sure we replace existing ctx
|
||||
// if we have a new localctx, make sure we replace existing ctx
|
||||
// that is previous child of parse tree
|
||||
if p.BuildParseTrees && p.ctx != localctx {
|
||||
if p.ctx.GetParent() != nil {
|
||||
@ -498,7 +500,7 @@ func (p *BaseParser) GetPrecedence() int {
|
||||
return p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
|
||||
func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
|
||||
p.SetState(state)
|
||||
p.precedenceStack.Push(precedence)
|
||||
p.ctx = localctx
|
||||
@ -512,7 +514,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleI
|
||||
//
|
||||
// Like {@link //EnterRule} but for recursive rules.
|
||||
|
||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
|
||||
func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
|
||||
previous := p.ctx
|
||||
previous.SetParent(localctx)
|
||||
previous.SetInvokingState(state)
|
||||
@ -530,7 +532,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state,
|
||||
}
|
||||
|
||||
func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
|
||||
p.precedenceStack.Pop()
|
||||
_, _ = p.precedenceStack.Pop()
|
||||
p.ctx.SetStop(p.input.LT(-1))
|
||||
retCtx := p.ctx // save current ctx (return value)
|
||||
// unroll so ctx is as it was before call to recursive method
|
||||
@ -561,29 +563,22 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
|
||||
func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
|
||||
return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedParameter
|
||||
func (p *BaseParser) inContext(context ParserRuleContext) bool {
|
||||
// TODO: useful in parser?
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// Checks whether or not {@code symbol} can follow the current state in the
|
||||
// ATN. The behavior of p.method is equivalent to the following, but is
|
||||
// IsExpectedToken checks whether symbol can follow the current state in the
|
||||
// {ATN}. The behavior of p.method is equivalent to the following, but is
|
||||
// implemented such that the complete context-sensitive follow set does not
|
||||
// need to be explicitly constructed.
|
||||
//
|
||||
// <pre>
|
||||
// return getExpectedTokens().contains(symbol)
|
||||
// </pre>
|
||||
//
|
||||
// @param symbol the symbol type to check
|
||||
// @return {@code true} if {@code symbol} can follow the current state in
|
||||
// the ATN, otherwise {@code false}.
|
||||
|
||||
// return getExpectedTokens().contains(symbol)
|
||||
func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
||||
atn := p.Interpreter.atn
|
||||
ctx := p.ctx
|
||||
@ -611,11 +606,9 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Computes the set of input symbols which could follow the current parser
|
||||
// state and context, as given by {@link //GetState} and {@link //GetContext},
|
||||
// GetExpectedTokens and returns the set of input symbols which could follow the current parser
|
||||
// state and context, as given by [GetState] and [GetContext],
|
||||
// respectively.
|
||||
//
|
||||
// @see ATN//getExpectedTokens(int, RuleContext)
|
||||
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
|
||||
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
|
||||
}
|
||||
@ -626,7 +619,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
|
||||
return atn.NextTokens(s, nil)
|
||||
}
|
||||
|
||||
// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
|
||||
// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
|
||||
func (p *BaseParser) GetRuleIndex(ruleName string) int {
|
||||
var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
|
||||
if ok {
|
||||
@ -636,13 +629,10 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Return List<String> of the rule names in your parser instance
|
||||
// GetRuleInvocationStack returns a list of the rule names in your parser instance
|
||||
// leading up to a call to the current rule. You could override if
|
||||
// you want more details such as the file/line info of where
|
||||
// in the ATN a rule is invoked.
|
||||
//
|
||||
// this very useful for error messages.
|
||||
|
||||
func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
|
||||
if c == nil {
|
||||
c = p.ctx
|
||||
@ -668,16 +658,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
|
||||
return stack
|
||||
}
|
||||
|
||||
// For debugging and other purposes.//
|
||||
// GetDFAStrings returns a list of all DFA states used for debugging purposes
|
||||
func (p *BaseParser) GetDFAStrings() string {
|
||||
return fmt.Sprint(p.Interpreter.decisionToDFA)
|
||||
}
|
||||
|
||||
// For debugging and other purposes.//
|
||||
// DumpDFA prints the whole of the DFA for debugging
|
||||
func (p *BaseParser) DumpDFA() {
|
||||
seenOne := false
|
||||
for _, dfa := range p.Interpreter.decisionToDFA {
|
||||
if dfa.states.Len() > 0 {
|
||||
if dfa.Len() > 0 {
|
||||
if seenOne {
|
||||
fmt.Println()
|
||||
}
|
||||
@ -692,8 +682,10 @@ func (p *BaseParser) GetSourceName() string {
|
||||
return p.GrammarFileName
|
||||
}
|
||||
|
||||
// During a parse is sometimes useful to listen in on the rule entry and exit
|
||||
// events as well as token Matches. p.is for quick and dirty debugging.
|
||||
// SetTrace installs a trace listener for the parse.
|
||||
//
|
||||
// During a parse it is sometimes useful to listen in on the rule entry and exit
|
||||
// events as well as token Matches. This is for quick and dirty debugging.
|
||||
func (p *BaseParser) SetTrace(trace *TraceListener) {
|
||||
if trace == nil {
|
||||
p.RemoveParseListener(p.tracer)
|
||||
File diff suppressed because it is too large
Load Diff
@ -31,7 +31,9 @@ type ParserRuleContext interface {
|
||||
}
|
||||
|
||||
type BaseParserRuleContext struct {
|
||||
*BaseRuleContext
|
||||
parentCtx RuleContext
|
||||
invokingState int
|
||||
RuleIndex int
|
||||
|
||||
start, stop Token
|
||||
exception RecognitionException
|
||||
@ -40,8 +42,22 @@ type BaseParserRuleContext struct {
|
||||
|
||||
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
|
||||
prc := new(BaseParserRuleContext)
|
||||
InitBaseParserRuleContext(prc, parent, invokingStateNumber)
|
||||
return prc
|
||||
}
|
||||
|
||||
prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
|
||||
func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
|
||||
// What context invoked b rule?
|
||||
prc.parentCtx = parent
|
||||
|
||||
// What state invoked the rule associated with b context?
|
||||
// The "return address" is the followState of invokingState
|
||||
// If parent is nil, b should be -1.
|
||||
if parent == nil {
|
||||
prc.invokingState = -1
|
||||
} else {
|
||||
prc.invokingState = invokingStateNumber
|
||||
}
|
||||
|
||||
prc.RuleIndex = -1
|
||||
// * If we are debugging or building a parse tree for a Visitor,
|
||||
@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int)
|
||||
// The exception that forced prc rule to return. If the rule successfully
|
||||
// completed, prc is {@code nil}.
|
||||
prc.exception = nil
|
||||
|
||||
return prc
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
|
||||
@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Double dispatch methods for listeners
|
||||
func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
|
||||
// EnterRule is called when any rule is entered.
|
||||
func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
|
||||
// ExitRule is called when any rule is exited.
|
||||
func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
|
||||
}
|
||||
|
||||
// * Does not set parent link other add methods do that///
|
||||
// * Does not set parent link other add methods do that
|
||||
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
|
||||
if prc.children == nil {
|
||||
prc.children = make([]Tree, 0)
|
||||
@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
|
||||
return child
|
||||
}
|
||||
|
||||
// * Used by EnterOuterAlt to toss out a RuleContext previously added as
|
||||
// we entered a rule. If we have // label, we will need to remove
|
||||
// generic ruleContext object.
|
||||
// /
|
||||
// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
|
||||
// we entered a rule. If we have a label, we will need to remove
|
||||
// the generic ruleContext object.
|
||||
func (prc *BaseParserRuleContext) RemoveLastChild() {
|
||||
if prc.children != nil && len(prc.children) > 0 {
|
||||
prc.children = prc.children[0 : len(prc.children)-1]
|
||||
@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int {
|
||||
return len(prc.children)
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
|
||||
func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
|
||||
if prc.start == nil || prc.stop == nil {
|
||||
return TreeInvalidInterval
|
||||
}
|
||||
@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
|
||||
return s
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetParent(v Tree) {
|
||||
if v == nil {
|
||||
prc.parentCtx = nil
|
||||
} else {
|
||||
prc.parentCtx = v.(RuleContext)
|
||||
}
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetInvokingState() int {
|
||||
return prc.invokingState
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetInvokingState(t int) {
|
||||
prc.invokingState = t
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetRuleIndex() int {
|
||||
return prc.RuleIndex
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) GetAltNumber() int {
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
|
||||
|
||||
// IsEmpty returns true if the context of b is empty.
|
||||
//
|
||||
// A context is empty if there is no invoking state, meaning nobody calls
|
||||
// current context.
|
||||
func (prc *BaseParserRuleContext) IsEmpty() bool {
|
||||
return prc.invokingState == -1
|
||||
}
|
||||
|
||||
// GetParent returns the combined text of all child nodes. This method only considers
|
||||
// tokens which have been added to the parse tree.
|
||||
//
|
||||
// Since tokens on hidden channels (e.g. whitespace or comments) are not
|
||||
// added to the parse trees, they will not appear in the output of this
|
||||
// method.
|
||||
func (prc *BaseParserRuleContext) GetParent() Tree {
|
||||
return prc.parentCtx
|
||||
}
|
||||
|
||||
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
|
||||
|
||||
type InterpreterRuleContext interface {
|
||||
@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct {
|
||||
*BaseParserRuleContext
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
|
||||
|
||||
prc := new(BaseInterpreterRuleContext)
|
||||
727
vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
generated
vendored
Normal file
727
vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
generated
vendored
Normal file
@ -0,0 +1,727 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/exp/slices"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var _emptyPredictionContextHash int
|
||||
|
||||
func init() {
|
||||
_emptyPredictionContextHash = murmurInit(1)
|
||||
_emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
|
||||
}
|
||||
|
||||
func calculateEmptyHash() int {
|
||||
return _emptyPredictionContextHash
|
||||
}
|
||||
|
||||
const (
|
||||
// BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
|
||||
// doesn't mean wildcard:
|
||||
//
|
||||
// $ + x = [$,x]
|
||||
//
|
||||
// Here,
|
||||
//
|
||||
// $ = EmptyReturnState
|
||||
BasePredictionContextEmptyReturnState = 0x7FFFFFFF
|
||||
)
|
||||
|
||||
// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
|
||||
//
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var (
|
||||
BasePredictionContextglobalNodeCount = 1
|
||||
BasePredictionContextid = BasePredictionContextglobalNodeCount
|
||||
)
|
||||
|
||||
const (
|
||||
PredictionContextEmpty = iota
|
||||
PredictionContextSingleton
|
||||
PredictionContextArray
|
||||
)
|
||||
|
||||
// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
|
||||
// emulate inheritance from Java, and can be used without an interface definition. An interface
|
||||
// is not required because no user code will ever need to implement this interface.
|
||||
type PredictionContext struct {
|
||||
cachedHash int
|
||||
pcType int
|
||||
parentCtx *PredictionContext
|
||||
returnState int
|
||||
parents []*PredictionContext
|
||||
returnStates []int
|
||||
}
|
||||
|
||||
func NewEmptyPredictionContext() *PredictionContext {
|
||||
nep := &PredictionContext{}
|
||||
nep.cachedHash = calculateEmptyHash()
|
||||
nep.pcType = PredictionContextEmpty
|
||||
nep.returnState = BasePredictionContextEmptyReturnState
|
||||
return nep
|
||||
}
|
||||
|
||||
func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
|
||||
pc := &PredictionContext{}
|
||||
pc.pcType = PredictionContextSingleton
|
||||
pc.returnState = returnState
|
||||
pc.parentCtx = parent
|
||||
if parent != nil {
|
||||
pc.cachedHash = calculateHash(parent, returnState)
|
||||
} else {
|
||||
pc.cachedHash = calculateEmptyHash()
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
|
||||
if returnState == BasePredictionContextEmptyReturnState && parent == nil {
|
||||
// someone can pass in the bits of an array ctx that mean $
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
return NewBaseSingletonPredictionContext(parent, returnState)
|
||||
}
|
||||
|
||||
func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
|
||||
// Parent can be nil only if full ctx mode and we make an array
|
||||
// from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
|
||||
// nil parent and
|
||||
// returnState == {@link //EmptyReturnState}.
|
||||
hash := murmurInit(1)
|
||||
for _, parent := range parents {
|
||||
hash = murmurUpdate(hash, parent.Hash())
|
||||
}
|
||||
for _, returnState := range returnStates {
|
||||
hash = murmurUpdate(hash, returnState)
|
||||
}
|
||||
hash = murmurFinish(hash, len(parents)<<1)
|
||||
|
||||
nec := &PredictionContext{}
|
||||
nec.cachedHash = hash
|
||||
nec.pcType = PredictionContextArray
|
||||
nec.parents = parents
|
||||
nec.returnStates = returnStates
|
||||
return nec
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Hash() int {
|
||||
return p.cachedHash
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
otherP := other.(*PredictionContext)
|
||||
return other == nil || otherP == nil || otherP.isEmpty()
|
||||
case PredictionContextSingleton:
|
||||
return p.SingletonEquals(other)
|
||||
case PredictionContextArray:
|
||||
return p.ArrayEquals(other)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
other := o.(*PredictionContext)
|
||||
if other == nil || other.pcType != PredictionContextArray {
|
||||
return false
|
||||
}
|
||||
if p.cachedHash != other.Hash() {
|
||||
return false // can't be same if hash is different
|
||||
}
|
||||
|
||||
// Must compare the actual array elements and not just the array address
|
||||
//
|
||||
return slices.Equal(p.returnStates, other.returnStates) &&
|
||||
slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
|
||||
return x.Equals(y)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
otherP := other.(*PredictionContext)
|
||||
if otherP == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if p.cachedHash != otherP.Hash() {
|
||||
return false // Can't be same if hash is different
|
||||
}
|
||||
|
||||
if p.returnState != otherP.getReturnState(0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Both parents must be nil if one is
|
||||
if p.parentCtx == nil {
|
||||
return otherP.parentCtx == nil
|
||||
}
|
||||
|
||||
return p.parentCtx.Equals(otherP.parentCtx)
|
||||
}
|
||||
|
||||
func (p *PredictionContext) GetParent(i int) *PredictionContext {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return nil
|
||||
case PredictionContextSingleton:
|
||||
return p.parentCtx
|
||||
case PredictionContextArray:
|
||||
return p.parents[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PredictionContext) getReturnState(i int) int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return p.returnStates[i]
|
||||
default:
|
||||
return p.returnState
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) GetReturnStates() []int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return p.returnStates
|
||||
default:
|
||||
return []int{p.returnState}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) length() int {
|
||||
switch p.pcType {
|
||||
case PredictionContextArray:
|
||||
return len(p.returnStates)
|
||||
default:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) hasEmptyPath() bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextSingleton:
|
||||
return p.returnState == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
|
||||
}
|
||||
|
||||
func (p *PredictionContext) String() string {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return "$"
|
||||
case PredictionContextSingleton:
|
||||
var up string
|
||||
|
||||
if p.parentCtx == nil {
|
||||
up = ""
|
||||
} else {
|
||||
up = p.parentCtx.String()
|
||||
}
|
||||
|
||||
if len(up) == 0 {
|
||||
if p.returnState == BasePredictionContextEmptyReturnState {
|
||||
return "$"
|
||||
}
|
||||
|
||||
return strconv.Itoa(p.returnState)
|
||||
}
|
||||
|
||||
return strconv.Itoa(p.returnState) + " " + up
|
||||
case PredictionContextArray:
|
||||
if p.isEmpty() {
|
||||
return "[]"
|
||||
}
|
||||
|
||||
s := "["
|
||||
for i := 0; i < len(p.returnStates); i++ {
|
||||
if i > 0 {
|
||||
s = s + ", "
|
||||
}
|
||||
if p.returnStates[i] == BasePredictionContextEmptyReturnState {
|
||||
s = s + "$"
|
||||
continue
|
||||
}
|
||||
s = s + strconv.Itoa(p.returnStates[i])
|
||||
if !p.parents[i].isEmpty() {
|
||||
s = s + " " + p.parents[i].String()
|
||||
} else {
|
||||
s = s + "nil"
|
||||
}
|
||||
}
|
||||
return s + "]"
|
||||
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) isEmpty() bool {
|
||||
switch p.pcType {
|
||||
case PredictionContextEmpty:
|
||||
return true
|
||||
case PredictionContextArray:
|
||||
// since EmptyReturnState can only appear in the last position, we
|
||||
// don't need to verify that size==1
|
||||
return p.returnStates[0] == BasePredictionContextEmptyReturnState
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PredictionContext) Type() int {
|
||||
return p.pcType
|
||||
}
|
||||
|
||||
func calculateHash(parent *PredictionContext, returnState int) int {
|
||||
h := murmurInit(1)
|
||||
h = murmurUpdate(h, parent.Hash())
|
||||
h = murmurUpdate(h, returnState)
|
||||
return murmurFinish(h, 2)
|
||||
}
|
||||
|
||||
// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
|
||||
// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
|
||||
// /
|
||||
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
|
||||
if outerContext == nil {
|
||||
outerContext = ParserRuleContextEmpty
|
||||
}
|
||||
// if we are in RuleContext of start rule, s, then BasePredictionContext
|
||||
// is EMPTY. Nobody called us. (if we are empty, return empty)
|
||||
if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
// If we have a parent, convert it to a BasePredictionContext graph
|
||||
parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
|
||||
state := a.states[outerContext.GetInvokingState()]
|
||||
transition := state.GetTransitions()[0]
|
||||
|
||||
return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
|
||||
}
|
||||
|
||||
func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
|
||||
// Share same graph if both same
|
||||
//
|
||||
if a == b || a.Equals(b) {
|
||||
return a
|
||||
}
|
||||
|
||||
if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
|
||||
return mergeSingletons(a, b, rootIsWildcard, mergeCache)
|
||||
}
|
||||
// At least one of a or b is array
|
||||
// If one is $ and rootIsWildcard, return $ as wildcard
|
||||
if rootIsWildcard {
|
||||
if a.isEmpty() {
|
||||
return a
|
||||
}
|
||||
if b.isEmpty() {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// Convert either Singleton or Empty to arrays, so that we can merge them
|
||||
//
|
||||
ara := convertToArray(a)
|
||||
arb := convertToArray(b)
|
||||
return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
|
||||
}
|
||||
|
||||
func convertToArray(pc *PredictionContext) *PredictionContext {
|
||||
switch pc.Type() {
|
||||
case PredictionContextEmpty:
|
||||
return NewArrayPredictionContext([]*PredictionContext{}, []int{})
|
||||
case PredictionContextSingleton:
|
||||
return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
|
||||
default:
|
||||
// Already an array
|
||||
}
|
||||
return pc
|
||||
}
|
||||
|
||||
// mergeSingletons merges two Singleton [PredictionContext] instances.
|
||||
//
|
||||
// Stack tops equal, parents merge is same return left graph.
|
||||
// <embed src="images/SingletonMerge_SameRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Same stack top, parents differ merge parents giving array node, then
|
||||
// remainders of those graphs. A new root node is created to point to the
|
||||
// merged parents.<br>
|
||||
// <embed src="images/SingletonMerge_SameRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to same parent. Make array node for the
|
||||
// root where both element in the root point to the same (original)
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootSamePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Different stack tops pointing to different parents. Make array node for
|
||||
// the root where each element points to the corresponding original
|
||||
// parent.<br>
|
||||
// <embed src="images/SingletonMerge_DiffRootDiffPar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// @param mergeCache
|
||||
// /
|
||||
func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous, present := mergeCache.Get(a, b)
|
||||
if present {
|
||||
return previous
|
||||
}
|
||||
previous, present = mergeCache.Get(b, a)
|
||||
if present {
|
||||
return previous
|
||||
}
|
||||
}
|
||||
|
||||
rootMerge := mergeRoot(a, b, rootIsWildcard)
|
||||
if rootMerge != nil {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, rootMerge)
|
||||
}
|
||||
return rootMerge
|
||||
}
|
||||
if a.returnState == b.returnState {
|
||||
parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
|
||||
// if parent is same as existing a or b parent or reduced to a parent,
|
||||
// return it
|
||||
if parent.Equals(a.parentCtx) {
|
||||
return a // ax + bx = ax, if a=b
|
||||
}
|
||||
if parent.Equals(b.parentCtx) {
|
||||
return b // ax + bx = bx, if a=b
|
||||
}
|
||||
// else: ax + ay = a'[x,y]
|
||||
// merge parents x and y, giving array node with x,y then remainders
|
||||
// of those graphs. dup a, a' points at merged array.
|
||||
// New joined parent so create a new singleton pointing to it, a'
|
||||
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, spc)
|
||||
}
|
||||
return spc
|
||||
}
|
||||
// a != b payloads differ
|
||||
// see if we can collapse parents due to $+x parents if local ctx
|
||||
var singleParent *PredictionContext
|
||||
if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
|
||||
// bx =
|
||||
// [a,b]x
|
||||
singleParent = a.parentCtx
|
||||
}
|
||||
if singleParent != nil { // parents are same
|
||||
// sort payloads and use same parent
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
if a.returnState > b.returnState {
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
}
|
||||
parents := []*PredictionContext{singleParent, singleParent}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
// parents differ and can't merge them. Just pack together
|
||||
// into array can't merge.
|
||||
// ax + by = [ax,by]
|
||||
payloads := []int{a.returnState, b.returnState}
|
||||
parents := []*PredictionContext{a.parentCtx, b.parentCtx}
|
||||
if a.returnState > b.returnState { // sort by payload
|
||||
payloads[0] = b.returnState
|
||||
payloads[1] = a.returnState
|
||||
parents = []*PredictionContext{b.parentCtx, a.parentCtx}
|
||||
}
|
||||
apc := NewArrayPredictionContext(parents, payloads)
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, apc)
|
||||
}
|
||||
return apc
|
||||
}
|
||||
|
||||
// Handle case where at least one of {@code a} or {@code b} is
|
||||
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
|
||||
// to represent {@link //EMPTY}.
|
||||
//
|
||||
// <h2>Local-Context Merges</h2>
|
||||
//
|
||||
// <p>These local-context merge operations are used when {@code rootIsWildcard}
|
||||
// is true.</p>
|
||||
//
|
||||
// <p>{@link //EMPTY} is superset of any graph return {@link //EMPTY}.<br>
|
||||
// <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
|
||||
// {@code //EMPTY} return left graph.<br>
|
||||
// <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Special case of last merge if local context.<br>
|
||||
// <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <h2>Full-Context Merges</h2>
|
||||
//
|
||||
// <p>These full-context merge operations are used when {@code rootIsWildcard}
|
||||
// is false.</p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Must keep all contexts {@link //EMPTY} in array is a special value (and
|
||||
// nil parent).<br>
|
||||
// <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// @param a the first {@link SingletonBasePredictionContext}
|
||||
// @param b the second {@link SingletonBasePredictionContext}
|
||||
// @param rootIsWildcard {@code true} if this is a local-context merge,
|
||||
// otherwise false to indicate a full-context merge
|
||||
// /
|
||||
func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
|
||||
if rootIsWildcard {
|
||||
if a.pcType == PredictionContextEmpty {
|
||||
return BasePredictionContextEMPTY // // + b =//
|
||||
}
|
||||
if b.pcType == PredictionContextEmpty {
|
||||
return BasePredictionContextEMPTY // a +// =//
|
||||
}
|
||||
} else {
|
||||
if a.isEmpty() && b.isEmpty() {
|
||||
return BasePredictionContextEMPTY // $ + $ = $
|
||||
} else if a.isEmpty() { // $ + x = [$,x]
|
||||
payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []*PredictionContext{b.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
} else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
|
||||
payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
|
||||
parents := []*PredictionContext{a.GetParent(-1), nil}
|
||||
return NewArrayPredictionContext(parents, payloads)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge two {@link ArrayBasePredictionContext} instances.
|
||||
//
|
||||
// <p>Different tops, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, same parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, different parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Shared top, all shared parents.<br>
|
||||
// <embed src="images/ArrayMerge_ShareTopSharePar.svg"
|
||||
// type="image/svg+xml"/></p>
|
||||
//
|
||||
// <p>Equal tops, merge parents and reduce top to
|
||||
// {@link SingletonBasePredictionContext}.<br>
|
||||
// <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
|
||||
//
|
||||
//goland:noinspection GoBoolExpressions
|
||||
func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
|
||||
if mergeCache != nil {
|
||||
previous, present := mergeCache.Get(a, b)
|
||||
if present {
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous
|
||||
}
|
||||
previous, present = mergeCache.Get(b, a)
|
||||
if present {
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
|
||||
}
|
||||
return previous
|
||||
}
|
||||
}
|
||||
// merge sorted payloads a + b => M
|
||||
i := 0 // walks a
|
||||
j := 0 // walks b
|
||||
k := 0 // walks target M array
|
||||
|
||||
mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
|
||||
mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
|
||||
// walk and merge to yield mergedParents, mergedReturnStates
|
||||
for i < len(a.returnStates) && j < len(b.returnStates) {
|
||||
aParent := a.parents[i]
|
||||
bParent := b.parents[j]
|
||||
if a.returnStates[i] == b.returnStates[j] {
|
||||
// same payload (stack tops are equal), must yield merged singleton
|
||||
payload := a.returnStates[i]
|
||||
// $+$ = $
|
||||
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
|
||||
axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
|
||||
// ->
|
||||
// ax
|
||||
if bothDollars || axAX {
|
||||
mergedParents[k] = aParent // choose left
|
||||
mergedReturnStates[k] = payload
|
||||
} else { // ax+ay -> a'[x,y]
|
||||
mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
|
||||
mergedParents[k] = mergedParent
|
||||
mergedReturnStates[k] = payload
|
||||
}
|
||||
i++ // hop over left one as usual
|
||||
j++ // but also Skip one in right side since we merge
|
||||
} else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
|
||||
mergedParents[k] = aParent
|
||||
mergedReturnStates[k] = a.returnStates[i]
|
||||
i++
|
||||
} else { // b > a, copy b[j] to M
|
||||
mergedParents[k] = bParent
|
||||
mergedReturnStates[k] = b.returnStates[j]
|
||||
j++
|
||||
}
|
||||
k++
|
||||
}
|
||||
// copy over any payloads remaining in either array
|
||||
if i < len(a.returnStates) {
|
||||
for p := i; p < len(a.returnStates); p++ {
|
||||
mergedParents[k] = a.parents[p]
|
||||
mergedReturnStates[k] = a.returnStates[p]
|
||||
k++
|
||||
}
|
||||
} else {
|
||||
for p := j; p < len(b.returnStates); p++ {
|
||||
mergedParents[k] = b.parents[p]
|
||||
mergedReturnStates[k] = b.returnStates[p]
|
||||
k++
|
||||
}
|
||||
}
|
||||
// trim merged if we combined a few that had same stack tops
|
||||
if k < len(mergedParents) { // write index < last position trim
|
||||
if k == 1 { // for just one merged element, return singleton top
|
||||
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, pc)
|
||||
}
|
||||
return pc
|
||||
}
|
||||
mergedParents = mergedParents[0:k]
|
||||
mergedReturnStates = mergedReturnStates[0:k]
|
||||
}
|
||||
|
||||
M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
|
||||
|
||||
// if we created same array as a or b, return that instead
|
||||
// TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
|
||||
if M.Equals(a) {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, a)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
|
||||
}
|
||||
return a
|
||||
}
|
||||
if M.Equals(b) {
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, b)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
|
||||
}
|
||||
return b
|
||||
}
|
||||
combineCommonParents(&mergedParents)
|
||||
|
||||
if mergeCache != nil {
|
||||
mergeCache.Put(a, b, M)
|
||||
}
|
||||
if runtimeConfig.parserATNSimulatorTraceATNSim {
|
||||
fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
|
||||
}
|
||||
return M
|
||||
}
|
||||
|
||||
// Make pass over all M parents and merge any Equals() ones.
|
||||
// Note that we pass a pointer to the slice as we want to modify it in place.
|
||||
//
|
||||
//goland:noinspection GoUnusedFunction
|
||||
func combineCommonParents(parents *[]*PredictionContext) {
|
||||
uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
|
||||
|
||||
for p := 0; p < len(*parents); p++ {
|
||||
parent := (*parents)[p]
|
||||
_, _ = uniqueParents.Put(parent)
|
||||
}
|
||||
for q := 0; q < len(*parents); q++ {
|
||||
pc, _ := uniqueParents.Get((*parents)[q])
|
||||
(*parents)[q] = pc
|
||||
}
|
||||
}
|
||||
|
||||
func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
|
||||
if context.isEmpty() {
|
||||
return context
|
||||
}
|
||||
existing, present := visited.Get(context)
|
||||
if present {
|
||||
return existing
|
||||
}
|
||||
|
||||
existing, present = contextCache.Get(context)
|
||||
if present {
|
||||
visited.Put(context, existing)
|
||||
return existing
|
||||
}
|
||||
changed := false
|
||||
parents := make([]*PredictionContext, context.length())
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
|
||||
if changed || !parent.Equals(context.GetParent(i)) {
|
||||
if !changed {
|
||||
parents = make([]*PredictionContext, context.length())
|
||||
for j := 0; j < context.length(); j++ {
|
||||
parents[j] = context.GetParent(j)
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
parents[i] = parent
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
contextCache.add(context)
|
||||
visited.Put(context, context)
|
||||
return context
|
||||
}
|
||||
var updated *PredictionContext
|
||||
if len(parents) == 0 {
|
||||
updated = BasePredictionContextEMPTY
|
||||
} else if len(parents) == 1 {
|
||||
updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
|
||||
} else {
|
||||
updated = NewArrayPredictionContext(parents, context.GetReturnStates())
|
||||
}
|
||||
contextCache.add(updated)
|
||||
visited.Put(updated, updated)
|
||||
visited.Put(context, updated)
|
||||
|
||||
return updated
|
||||
}
|
||||
48
vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
generated
vendored
Normal file
48
vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package antlr
|
||||
|
||||
var BasePredictionContextEMPTY = &PredictionContext{
|
||||
cachedHash: calculateEmptyHash(),
|
||||
pcType: PredictionContextEmpty,
|
||||
returnState: BasePredictionContextEmptyReturnState,
|
||||
}
|
||||
|
||||
// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
|
||||
// context cash associated with contexts in DFA states. This cache
|
||||
// can be used for both lexers and parsers.
|
||||
type PredictionContextCache struct {
|
||||
cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
|
||||
}
|
||||
|
||||
func NewPredictionContextCache() *PredictionContextCache {
|
||||
return &PredictionContextCache{
|
||||
cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
|
||||
}
|
||||
}
|
||||
|
||||
// Add a context to the cache and return it. If the context already exists,
|
||||
// return that one instead and do not add a new context to the cache.
|
||||
// Protect shared cache from unsafe thread access.
|
||||
func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
|
||||
if ctx.isEmpty() {
|
||||
return BasePredictionContextEMPTY
|
||||
}
|
||||
|
||||
// Put will return the existing entry if it is present (note this is done via Equals, not whether it is
|
||||
// the same pointer), otherwise it will add the new entry and return that.
|
||||
//
|
||||
existing, present := p.cache.Get(ctx)
|
||||
if present {
|
||||
return existing
|
||||
}
|
||||
p.cache.Put(ctx, ctx)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
|
||||
pc, exists := p.cache.Get(ctx)
|
||||
return pc, exists
|
||||
}
|
||||
|
||||
func (p *PredictionContextCache) length() int {
|
||||
return p.cache.Len()
|
||||
}
|
||||
536
vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
generated
vendored
Normal file
536
vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
generated
vendored
Normal file
@ -0,0 +1,536 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// This enumeration defines the prediction modes available in ANTLR 4 along with
|
||||
// utility methods for analyzing configuration sets for conflicts and/or
|
||||
// ambiguities.
|
||||
|
||||
const (
|
||||
// PredictionModeSLL represents the SLL(*) prediction mode.
|
||||
// This prediction mode ignores the current
|
||||
// parser context when making predictions. This is the fastest prediction
|
||||
// mode, and provides correct results for many grammars. This prediction
|
||||
// mode is more powerful than the prediction mode provided by ANTLR 3, but
|
||||
// may result in syntax errors for grammar and input combinations which are
|
||||
// not SLL.
|
||||
//
|
||||
// When using this prediction mode, the parser will either return a correct
|
||||
// parse tree (i.e. the same parse tree that would be returned with the
|
||||
// [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
|
||||
// syntax error is encountered when using the SLL prediction mode,
|
||||
// it may be due to either an actual syntax error in the input or indicate
|
||||
// that the particular combination of grammar and input requires the more
|
||||
// powerful LL prediction abilities to complete successfully.
|
||||
//
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.
|
||||
//
|
||||
PredictionModeSLL = 0
|
||||
|
||||
// PredictionModeLL represents the LL(*) prediction mode.
|
||||
// This prediction mode allows the current parser
|
||||
// context to be used for resolving SLL conflicts that occur during
|
||||
// prediction. This is the fastest prediction mode that guarantees correct
|
||||
// parse results for all combinations of grammars with syntactically correct
|
||||
// inputs.
|
||||
//
|
||||
// When using this prediction mode, the parser will make correct decisions
|
||||
// for all syntactically-correct grammar and input combinations. However, in
|
||||
// cases where the grammar is truly ambiguous this prediction mode might not
|
||||
// report a precise answer for exactly which alternatives are
|
||||
// ambiguous.
|
||||
//
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.
|
||||
//
|
||||
PredictionModeLL = 1
|
||||
|
||||
// PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
|
||||
// with exact ambiguity detection.
|
||||
//
|
||||
// In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
|
||||
// this prediction mode instructs the prediction algorithm to determine the
|
||||
// complete and exact set of ambiguous alternatives for every ambiguous
|
||||
// decision encountered while parsing.
|
||||
//
|
||||
// This prediction mode may be used for diagnosing ambiguities during
|
||||
// grammar development. Due to the performance overhead of calculating sets
|
||||
// of ambiguous alternatives, this prediction mode should be avoided when
|
||||
// the exact results are not necessary.
|
||||
//
|
||||
// This prediction mode does not provide any guarantees for prediction
|
||||
// behavior for syntactically-incorrect inputs.
|
||||
//
|
||||
PredictionModeLLExactAmbigDetection = 2
|
||||
)
|
||||
|
||||
// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
|
||||
//
|
||||
// This method computes the SLL prediction termination condition for both of
|
||||
// the following cases:
|
||||
//
|
||||
// - The usual SLL+LL fallback upon SLL conflict
|
||||
// - Pure SLL without LL fallback
|
||||
//
|
||||
// # Combined SLL+LL Parsing
|
||||
//
|
||||
// When LL-fallback is enabled upon SLL conflict, correct predictions are
|
||||
// ensured regardless of how the termination condition is computed by this
|
||||
// method. Due to the substantially higher cost of LL prediction, the
|
||||
// prediction should only fall back to LL when the additional lookahead
|
||||
// cannot lead to a unique SLL prediction.
|
||||
//
|
||||
// Assuming combined SLL+LL parsing, an SLL configuration set with only
|
||||
// conflicting subsets should fall back to full LL, even if the
|
||||
// configuration sets don't resolve to the same alternative, e.g.
|
||||
//
|
||||
// {1,2} and {3,4}
|
||||
//
|
||||
// If there is at least one non-conflicting
|
||||
// configuration, SLL could continue with the hopes that more lookahead will
|
||||
// resolve via one of those non-conflicting configurations.
|
||||
//
|
||||
// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
|
||||
// stops when it sees only conflicting configuration subsets. In contrast,
|
||||
// full LL keeps going when there is uncertainty.
|
||||
//
|
||||
// # Heuristic
|
||||
//
|
||||
// As a heuristic, we stop prediction when we see any conflicting subset
|
||||
// unless we see a state that only has one alternative associated with it.
|
||||
// The single-alt-state thing lets prediction continue upon rules like
|
||||
// (otherwise, it would admit defeat too soon):
|
||||
//
|
||||
// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
|
||||
//
|
||||
// When the [ATN] simulation reaches the state before ';', it has a
|
||||
// [DFA] state that looks like:
|
||||
//
|
||||
// [12|1|[], 6|2|[], 12|2|[]]
|
||||
//
|
||||
// Naturally
|
||||
//
|
||||
// 12|1|[] and 12|2|[]
|
||||
//
|
||||
// conflict, but we cannot stop processing this node because alternative to has another way to continue,
|
||||
// via
|
||||
//
|
||||
// [6|2|[]]
|
||||
//
|
||||
// It also let's us continue for this rule:
|
||||
//
|
||||
// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
|
||||
//
|
||||
// After Matching input A, we reach the stop state for rule A, state 1.
|
||||
// State 8 is the state immediately before B. Clearly alternatives 1 and 2
|
||||
// conflict and no amount of further lookahead will separate the two.
|
||||
// However, alternative 3 will be able to continue, and so we do not stop
|
||||
// working on this state. In the previous example, we're concerned with
|
||||
// states associated with the conflicting alternatives. Here alt 3 is not
|
||||
// associated with the conflicting configs, but since we can continue
|
||||
// looking for input reasonably, don't declare the state done.
|
||||
//
|
||||
// # Pure SLL Parsing
|
||||
//
|
||||
// To handle pure SLL parsing, all we have to do is make sure that we
|
||||
// combine stack contexts for configurations that differ only by semantic
|
||||
// predicate. From there, we can do the usual SLL termination heuristic.
|
||||
//
|
||||
// # Predicates in SLL+LL Parsing
|
||||
//
|
||||
// SLL decisions don't evaluate predicates until after they reach [DFA] stop
|
||||
// states because they need to create the [DFA] cache that works in all
|
||||
// semantic situations. In contrast, full LL evaluates predicates collected
|
||||
// during start state computation, so it can ignore predicates thereafter.
|
||||
// This means that SLL termination detection can totally ignore semantic
|
||||
// predicates.
|
||||
//
|
||||
// Implementation-wise, [ATNConfigSet] combines stack contexts but not
|
||||
// semantic predicate contexts, so we might see two configurations like the
|
||||
// following:
|
||||
//
|
||||
// (s, 1, x, {}), (s, 1, x', {p})
|
||||
//
|
||||
// Before testing these configurations against others, we have to merge
|
||||
// x and x' (without modifying the existing configurations).
|
||||
// For example, we test (x+x')==x” when looking for conflicts in
|
||||
// the following configurations:
|
||||
//
|
||||
// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
|
||||
//
|
||||
// If the configuration set has predicates (as indicated by
|
||||
// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
|
||||
// the configurations to strip out all the predicates so that a standard
|
||||
// [ATNConfigSet] will merge everything ignoring predicates.
|
||||
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
|
||||
|
||||
// Configs in rule stop states indicate reaching the end of the decision
|
||||
// rule (local context) or end of start rule (full context). If all
|
||||
// configs meet this condition, then none of the configurations is able
|
||||
// to Match additional input, so we terminate prediction.
|
||||
//
|
||||
if PredictionModeallConfigsInRuleStopStates(configs) {
|
||||
return true
|
||||
}
|
||||
|
||||
// pure SLL mode parsing
|
||||
if mode == PredictionModeSLL {
|
||||
// Don't bother with combining configs from different semantic
|
||||
// contexts if we can fail over to full LL costs more time
|
||||
// since we'll often fail over anyway.
|
||||
if configs.hasSemanticContext {
|
||||
// dup configs, tossing out semantic predicates
|
||||
dup := NewATNConfigSet(false)
|
||||
for _, c := range configs.configs {
|
||||
|
||||
// NewATNConfig({semanticContext:}, c)
|
||||
c = NewATNConfig2(c, SemanticContextNone)
|
||||
dup.Add(c, nil)
|
||||
}
|
||||
configs = dup
|
||||
}
|
||||
// now we have combined contexts for configs with dissimilar predicates
|
||||
}
|
||||
// pure SLL or combined SLL+LL mode parsing
|
||||
altsets := PredictionModegetConflictingAltSubsets(configs)
|
||||
return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
|
||||
}
|
||||
|
||||
// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
|
||||
// [RuleStopState]. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
|
||||
func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
|
||||
for _, c := range configs.configs {
|
||||
if _, ok := c.GetState().(*RuleStopState); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
|
||||
// [RuleStopState]. Configurations meeting this condition have reached
|
||||
// the end of the decision rule (local context) or end of start rule (full
|
||||
// context).
|
||||
//
|
||||
// the func returns true if all configurations in configs are in a
|
||||
// [RuleStopState]
|
||||
func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
|
||||
|
||||
for _, c := range configs.configs {
|
||||
if _, ok := c.GetState().(*RuleStopState); !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
|
||||
//
|
||||
// Can we stop looking ahead during [ATN] simulation or is there some
|
||||
// uncertainty as to which alternative we will ultimately pick, after
|
||||
// consuming more input? Even if there are partial conflicts, we might know
|
||||
// that everything is going to resolve to the same minimum alternative. That
|
||||
// means we can stop since no more lookahead will change that fact. On the
|
||||
// other hand, there might be multiple conflicts that resolve to different
|
||||
// minimums. That means we need more look ahead to decide which of those
|
||||
// alternatives we should predict.
|
||||
//
|
||||
// The basic idea is to split the set of configurations 'C', into
|
||||
// conflicting subsets (s, _, ctx, _) and singleton subsets with
|
||||
// non-conflicting configurations. Two configurations conflict if they have
|
||||
// identical [ATNConfig].state and [ATNConfig].context values
|
||||
// but a different [ATNConfig].alt value, e.g.
|
||||
//
|
||||
// (s, i, ctx, _)
|
||||
//
|
||||
// and
|
||||
//
|
||||
// (s, j, ctx, _) ; for i != j
|
||||
//
|
||||
// Reduce these configuration subsets to the set of possible alternatives.
|
||||
// You can compute the alternative subsets in one pass as follows:
|
||||
//
|
||||
// A_s,ctx = {i | (s, i, ctx, _)}
|
||||
//
|
||||
// for each configuration in C holding s and ctx fixed.
|
||||
//
|
||||
// Or in pseudo-code:
|
||||
//
|
||||
// for each configuration c in C:
|
||||
// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
|
||||
//
|
||||
// The values in map are the set of
|
||||
//
|
||||
// A_s,ctx
|
||||
//
|
||||
// sets.
|
||||
//
|
||||
// If
|
||||
//
|
||||
// |A_s,ctx| = 1
|
||||
//
|
||||
// then there is no conflict associated with s and ctx.
|
||||
//
|
||||
// Reduce the subsets to singletons by choosing a minimum of each subset. If
|
||||
// the union of these alternative subsets is a singleton, then no amount of
|
||||
// further lookahead will help us. We will always pick that alternative. If,
|
||||
// however, there is more than one alternative, then we are uncertain which
|
||||
// alternative to predict and must continue looking for resolution. We may
|
||||
// or may not discover an ambiguity in the future, even if there are no
|
||||
// conflicting subsets this round.
|
||||
//
|
||||
// The biggest sin is to terminate early because it means we've made a
|
||||
// decision but were uncertain as to the eventual outcome. We haven't used
|
||||
// enough lookahead. On the other hand, announcing a conflict too late is no
|
||||
// big deal; you will still have the conflict. It's just inefficient. It
|
||||
// might even look until the end of file.
|
||||
//
|
||||
// No special consideration for semantic predicates is required because
|
||||
// predicates are evaluated on-the-fly for full LL prediction, ensuring that
|
||||
// no configuration contains a semantic context during the termination
|
||||
// check.
|
||||
//
|
||||
// # Conflicting Configs
|
||||
//
|
||||
// Two configurations:
|
||||
//
|
||||
// (s, i, x) and (s, j, x')
|
||||
//
|
||||
// conflict when i != j but x = x'. Because we merge all
|
||||
// (s, i, _) configurations together, that means that there are at
|
||||
// most n configurations associated with state s for
|
||||
// n possible alternatives in the decision. The merged stacks
|
||||
// complicate the comparison of configuration contexts x and x'.
|
||||
//
|
||||
// Sam checks to see if one is a subset of the other by calling
|
||||
// merge and checking to see if the merged result is either x or x'.
|
||||
// If the x associated with lowest alternative i
|
||||
// is the superset, then i is the only possible prediction since the
|
||||
// others resolve to min(i) as well. However, if x is
|
||||
// associated with j > i then at least one stack configuration for
|
||||
// j is not in conflict with alternative i. The algorithm
|
||||
// should keep going, looking for more lookahead due to the uncertainty.
|
||||
//
|
||||
// For simplicity, I'm doing an equality check between x and
|
||||
// x', which lets the algorithm continue to consume lookahead longer
|
||||
// than necessary. The reason I like the equality is of course the
|
||||
// simplicity but also because that is the test you need to detect the
|
||||
// alternatives that are actually in conflict.
|
||||
//
|
||||
// # Continue/Stop Rule
|
||||
//
|
||||
// Continue if the union of resolved alternative sets from non-conflicting and
|
||||
// conflicting alternative subsets has more than one alternative. We are
|
||||
// uncertain about which alternative to predict.
|
||||
//
|
||||
// The complete set of alternatives,
|
||||
//
|
||||
// [i for (_, i, _)]
|
||||
//
|
||||
// tells us which alternatives are still in the running for the amount of input we've
|
||||
// consumed at this point. The conflicting sets let us to strip away
|
||||
// configurations that won't lead to more states because we resolve
|
||||
// conflicts to the configuration with a minimum alternate for the
|
||||
// conflicting set.
|
||||
//
|
||||
// Cases
|
||||
//
|
||||
// - no conflicts and more than 1 alternative in set => continue
|
||||
// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
|
||||
// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
|
||||
// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
|
||||
// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
|
||||
// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
|
||||
// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {2} = {1,2} => continue
|
||||
// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {2} = {1,2} => continue
|
||||
// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
|
||||
// {1} ∪ {3} = {1,3} => continue
|
||||
//
|
||||
// # Exact Ambiguity Detection
|
||||
//
|
||||
// If all states report the same conflicting set of alternatives, then we
|
||||
// know we have the exact ambiguity set:
|
||||
//
|
||||
// |A_i| > 1
|
||||
//
|
||||
// and
|
||||
//
|
||||
// A_i = A_j ; for all i, j
|
||||
//
|
||||
// In other words, we continue examining lookahead until all A_i
|
||||
// have more than one alternative and all A_i are the same. If
|
||||
//
|
||||
// A={{1,2}, {1,3}}
|
||||
//
|
||||
// then regular LL prediction would terminate because the resolved set is {1}.
|
||||
// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
|
||||
// two or one and three so we keep going. We can only stop prediction when
|
||||
// we need exact ambiguity detection when the sets look like:
|
||||
//
|
||||
// A={{1,2}}
|
||||
//
|
||||
// or
|
||||
//
|
||||
// {{1,2},{1,2}}, etc...
|
||||
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
|
||||
return PredictionModegetSingleViableAlt(altsets)
|
||||
}
|
||||
|
||||
// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
|
||||
// than one alternative.
|
||||
//
|
||||
// The func returns true if every [BitSet] in altsets has
|
||||
// [BitSet].cardinality cardinality > 1
|
||||
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
|
||||
return !PredictionModehasNonConflictingAltSet(altsets)
|
||||
}
|
||||
|
||||
// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
|
||||
// exactly one alternative.
|
||||
//
|
||||
// The func returns true if altsets contains at least one [BitSet] with
|
||||
// [BitSet].cardinality cardinality 1
|
||||
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
|
||||
// more than one alternative.
|
||||
//
|
||||
// The func returns true if altsets contains a [BitSet] with
|
||||
// [BitSet].cardinality cardinality > 1, otherwise false
|
||||
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if alts.length() > 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
|
||||
//
|
||||
// The func returns true if every member of altsets is equal to the others.
|
||||
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
|
||||
var first *BitSet
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
if first == nil {
|
||||
first = alts
|
||||
} else if alts != first {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
|
||||
// altsets. If no such alternative exists, this method returns
|
||||
// [ATNInvalidAltNumber].
|
||||
//
|
||||
// @param altsets a collection of alternative subsets
|
||||
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
|
||||
all := PredictionModeGetAlts(altsets)
|
||||
if all.length() == 1 {
|
||||
return all.minValue()
|
||||
}
|
||||
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
|
||||
// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
|
||||
// alternative subsets. This method returns the union of each [BitSet]
|
||||
// in altsets, being the set of represented alternatives in altsets.
|
||||
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
|
||||
all := NewBitSet()
|
||||
for _, alts := range altsets {
|
||||
all.or(alts)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
|
||||
//
|
||||
// for each configuration c in configs:
|
||||
// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
|
||||
func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
|
||||
configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
|
||||
|
||||
for _, c := range configs.configs {
|
||||
|
||||
alts, ok := configToAlts.Get(c)
|
||||
if !ok {
|
||||
alts = NewBitSet()
|
||||
configToAlts.Put(c, alts)
|
||||
}
|
||||
alts.add(c.GetAlt())
|
||||
}
|
||||
|
||||
return configToAlts.Values()
|
||||
}
|
||||
|
||||
// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
|
||||
//
|
||||
// for each configuration c in configs:
|
||||
// map[c.ATNConfig.state] U= c.ATNConfig.alt}
|
||||
func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
|
||||
m := NewAltDict()
|
||||
|
||||
for _, c := range configs.configs {
|
||||
alts := m.Get(c.GetState().String())
|
||||
if alts == nil {
|
||||
alts = NewBitSet()
|
||||
m.put(c.GetState().String(), alts)
|
||||
}
|
||||
alts.(*BitSet).add(c.GetAlt())
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
|
||||
values := PredictionModeGetStateToAltMap(configs).values()
|
||||
for i := 0; i < len(values); i++ {
|
||||
if values[i].(*BitSet).length() == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
|
||||
// if there is one.
|
||||
//
|
||||
// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
|
||||
func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
|
||||
result := ATNInvalidAltNumber
|
||||
|
||||
for i := 0; i < len(altsets); i++ {
|
||||
alts := altsets[i]
|
||||
minAlt := alts.minValue()
|
||||
if result == ATNInvalidAltNumber {
|
||||
result = minAlt
|
||||
} else if result != minAlt { // more than 1 viable alt
|
||||
return ATNInvalidAltNumber
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -26,6 +26,9 @@ type Recognizer interface {
|
||||
RemoveErrorListeners()
|
||||
GetATN() *ATN
|
||||
GetErrorListenerDispatch() ErrorListener
|
||||
HasError() bool
|
||||
GetError() RecognitionException
|
||||
SetError(RecognitionException)
|
||||
}
|
||||
|
||||
type BaseRecognizer struct {
|
||||
@ -36,6 +39,7 @@ type BaseRecognizer struct {
|
||||
LiteralNames []string
|
||||
SymbolicNames []string
|
||||
GrammarFileName string
|
||||
SynErr RecognitionException
|
||||
}
|
||||
|
||||
func NewBaseRecognizer() *BaseRecognizer {
|
||||
@ -45,7 +49,10 @@ func NewBaseRecognizer() *BaseRecognizer {
|
||||
return rec
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var tokenTypeMapCache = make(map[string]int)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var ruleIndexMapCache = make(map[string]int)
|
||||
|
||||
func (b *BaseRecognizer) checkVersion(toolVersion string) {
|
||||
@ -55,7 +62,19 @@ func (b *BaseRecognizer) checkVersion(toolVersion string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
|
||||
func (b *BaseRecognizer) SetError(err RecognitionException) {
|
||||
b.SynErr = err
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) HasError() bool {
|
||||
return b.SynErr != nil
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetError() RecognitionException {
|
||||
return b.SynErr
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
|
||||
panic("action not implemented on Recognizer!")
|
||||
}
|
||||
|
||||
@ -105,9 +124,11 @@ func (b *BaseRecognizer) SetState(v int) {
|
||||
// return result
|
||||
//}
|
||||
|
||||
// Get a map from rule names to rule indexes.
|
||||
// GetRuleIndexMap Get a map from rule names to rule indexes.
|
||||
//
|
||||
// <p>Used for XPath and tree pattern compilation.</p>
|
||||
// Used for XPath and tree pattern compilation.
|
||||
//
|
||||
// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
|
||||
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
|
||||
|
||||
panic("Method not defined!")
|
||||
@ -124,7 +145,8 @@ func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
|
||||
// return result
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) GetTokenType(tokenName string) int {
|
||||
// GetTokenType get the token type based upon its name
|
||||
func (b *BaseRecognizer) GetTokenType(_ string) int {
|
||||
panic("Method not defined!")
|
||||
// var ttype = b.GetTokenTypeMap()[tokenName]
|
||||
// if (ttype !=nil) {
|
||||
@ -162,26 +184,27 @@ func (b *BaseRecognizer) GetTokenType(tokenName string) int {
|
||||
// }
|
||||
//}
|
||||
|
||||
// What is the error header, normally line/character position information?//
|
||||
// GetErrorHeader returns the error header, normally line/character position information.
|
||||
//
|
||||
// Can be overridden in sub structs embedding BaseRecognizer.
|
||||
func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
|
||||
line := e.GetOffendingToken().GetLine()
|
||||
column := e.GetOffendingToken().GetColumn()
|
||||
return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
|
||||
}
|
||||
|
||||
// How should a token be displayed in an error message? The default
|
||||
// GetTokenErrorDisplay shows how a token should be displayed in an error message.
|
||||
//
|
||||
// is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
// The default is to display just the text, but during development you might
|
||||
// want to have a lot of information spit out. Override in that case
|
||||
// to use t.String() (which, for CommonToken, dumps everything about
|
||||
// the token). This is better than forcing you to override a method in
|
||||
// your token objects because you don't have to go modify your lexer
|
||||
// so that it creates a NewJava type.
|
||||
//
|
||||
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
|
||||
// implementations of {@link ANTLRErrorStrategy} may provide a similar
|
||||
// feature when necessary. For example, see
|
||||
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
|
||||
// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
|
||||
// implementations of [ANTLRErrorStrategy] may provide a similar
|
||||
// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
|
||||
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
|
||||
if t == nil {
|
||||
return "<no token>"
|
||||
@ -205,12 +228,14 @@ func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
|
||||
return NewProxyErrorListener(b.listeners)
|
||||
}
|
||||
|
||||
// subclass needs to override these if there are sempreds or actions
|
||||
// that the ATN interp needs to execute
|
||||
func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
|
||||
// Sempred embedding structs need to override this if there are sempreds or actions
|
||||
// that the ATN interpreter needs to execute
|
||||
func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
|
||||
// Precpred embedding structs need to override this if there are preceding predicates
|
||||
// that the ATN interpreter needs to execute
|
||||
func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
|
||||
return true
|
||||
}
|
||||
40
vendor/github.com/antlr4-go/antlr/v4/rule_context.go
generated
vendored
Normal file
40
vendor/github.com/antlr4-go/antlr/v4/rule_context.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
|
||||
// Use of this file is governed by the BSD 3-clause license that
|
||||
// can be found in the LICENSE.txt file in the project root.
|
||||
|
||||
package antlr
|
||||
|
||||
// RuleContext is a record of a single rule invocation. It knows
|
||||
// which context invoked it, if any. If there is no parent context, then
|
||||
// naturally the invoking state is not valid. The parent link
|
||||
// provides a chain upwards from the current rule invocation to the root
|
||||
// of the invocation tree, forming a stack.
|
||||
//
|
||||
// We actually carry no information about the rule associated with this context (except
|
||||
// when parsing). We keep only the state number of the invoking state from
|
||||
// the [ATN] submachine that invoked this. Contrast this with the s
|
||||
// pointer inside [ParserRuleContext] that tracks the current state
|
||||
// being "executed" for the current rule.
|
||||
//
|
||||
// The parent contexts are useful for computing lookahead sets and
|
||||
// getting error information.
|
||||
//
|
||||
// These objects are used during parsing and prediction.
|
||||
// For the special case of parsers, we use the struct
|
||||
// [ParserRuleContext], which embeds a RuleContext.
|
||||
//
|
||||
// @see ParserRuleContext
|
||||
type RuleContext interface {
|
||||
RuleNode
|
||||
|
||||
GetInvokingState() int
|
||||
SetInvokingState(int)
|
||||
|
||||
GetRuleIndex() int
|
||||
IsEmpty() bool
|
||||
|
||||
GetAltNumber() int
|
||||
SetAltNumber(altNumber int)
|
||||
|
||||
String([]string, RuleContext) string
|
||||
}
|
||||
@ -9,14 +9,13 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A tree structure used to record the semantic context in which
|
||||
// an ATN configuration is valid. It's either a single predicate,
|
||||
// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
|
||||
// SemanticContext is a tree structure used to record the semantic context in which
|
||||
//
|
||||
// <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
|
||||
// {@link SemanticContext} within the scope of this outer class.</p>
|
||||
// an ATN configuration is valid. It's either a single predicate,
|
||||
// a conjunction p1 && p2, or a sum of products p1 || p2.
|
||||
//
|
||||
|
||||
// I have scoped the AND, OR, and Predicate subclasses of
|
||||
// [SemanticContext] within the scope of this outer ``class''
|
||||
type SemanticContext interface {
|
||||
Equals(other Collectable[SemanticContext]) bool
|
||||
Hash() int
|
||||
@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
|
||||
|
||||
var SemanticContextNone = NewPredicate(-1, -1, false)
|
||||
|
||||
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
|
||||
func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
|
||||
return p
|
||||
}
|
||||
|
||||
@ -198,7 +197,7 @@ type AND struct {
|
||||
|
||||
func NewAND(a, b SemanticContext) *AND {
|
||||
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
|
||||
if aa, ok := a.(*AND); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Put(o)
|
||||
@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND {
|
||||
|
||||
vs := operands.Values()
|
||||
opnds := make([]SemanticContext, len(vs))
|
||||
for i, v := range vs {
|
||||
opnds[i] = v.(SemanticContext)
|
||||
}
|
||||
copy(opnds, vs)
|
||||
|
||||
and := new(AND)
|
||||
and.opnds = opnds
|
||||
@ -316,12 +313,12 @@ func (a *AND) Hash() int {
|
||||
return murmurFinish(h, len(a.opnds))
|
||||
}
|
||||
|
||||
func (a *OR) Hash() int {
|
||||
h := murmurInit(41) // Init with a value different from AND
|
||||
for _, op := range a.opnds {
|
||||
func (o *OR) Hash() int {
|
||||
h := murmurInit(41) // Init with o value different from AND
|
||||
for _, op := range o.opnds {
|
||||
h = murmurUpdate(h, op.Hash())
|
||||
}
|
||||
return murmurFinish(h, len(a.opnds))
|
||||
return murmurFinish(h, len(o.opnds))
|
||||
}
|
||||
|
||||
func (a *AND) String() string {
|
||||
@ -349,7 +346,7 @@ type OR struct {
|
||||
|
||||
func NewOR(a, b SemanticContext) *OR {
|
||||
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
|
||||
operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
|
||||
if aa, ok := a.(*OR); ok {
|
||||
for _, o := range aa.opnds {
|
||||
operands.Put(o)
|
||||
@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR {
|
||||
vs := operands.Values()
|
||||
|
||||
opnds := make([]SemanticContext, len(vs))
|
||||
for i, v := range vs {
|
||||
opnds[i] = v.(SemanticContext)
|
||||
}
|
||||
copy(opnds, vs)
|
||||
|
||||
o := new(OR)
|
||||
o.opnds = opnds
|
||||
281
vendor/github.com/antlr4-go/antlr/v4/statistics.go
generated
vendored
Normal file
281
vendor/github.com/antlr4-go/antlr/v4/statistics.go
generated
vendored
Normal file
@ -0,0 +1,281 @@
|
||||
//go:build antlr.stats
|
||||
|
||||
package antlr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
|
||||
// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
|
||||
//
|
||||
|
||||
// Tells various components to collect statistics - because it is only true when this file is included, it will
|
||||
// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
|
||||
const collectStats = true
|
||||
|
||||
// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
|
||||
// It is exported so that it can be used by others to look for things that are not already looked for in the
|
||||
// runtime statistics.
|
||||
type goRunStats struct {
|
||||
|
||||
// jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
|
||||
// during a run. It is exported so that it can be used by others to look for things that are not already looked for
|
||||
// within this package.
|
||||
//
|
||||
jStats []*JStatRec
|
||||
jStatsLock sync.RWMutex
|
||||
topN int
|
||||
topNByMax []*JStatRec
|
||||
topNByUsed []*JStatRec
|
||||
unusedCollections map[CollectionSource]int
|
||||
counts map[CollectionSource]int
|
||||
}
|
||||
|
||||
const (
|
||||
collectionsFile = "collections"
|
||||
)
|
||||
|
||||
var (
|
||||
Statistics = &goRunStats{
|
||||
topN: 10,
|
||||
}
|
||||
)
|
||||
|
||||
type statsOption func(*goRunStats) error
|
||||
|
||||
// Configure allows the statistics system to be configured as the user wants and override the defaults
|
||||
func (s *goRunStats) Configure(options ...statsOption) error {
|
||||
for _, option := range options {
|
||||
err := option(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
|
||||
//
|
||||
// For example, if you want to see the top 20 collections by size, you can do:
|
||||
//
|
||||
// antlr.Statistics.Configure(antlr.WithTopN(20))
|
||||
func WithTopN(topN int) statsOption {
|
||||
return func(s *goRunStats) error {
|
||||
s.topN = topN
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
|
||||
//
|
||||
// The function gathers and analyzes a number of statistics about any particular run of
|
||||
// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
|
||||
// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
|
||||
// especially useful in tracking down bugs or performance problems when an ANTLR user could
|
||||
// supply the output from this package, but cannot supply the grammar file(s) they are using, even
|
||||
// privately to the maintainers.
|
||||
//
|
||||
// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
|
||||
// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
|
||||
// extant unless the calling program is built with the antlr.stats tag like so:
|
||||
//
|
||||
// go build -tags antlr.stats .
|
||||
//
|
||||
// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
|
||||
// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
|
||||
// [Statistics.Report] function to report the statistics.
|
||||
//
|
||||
// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
|
||||
// me [Jim Idle] directly at jimi@idle.ws
|
||||
//
|
||||
// [Jim Idle]: https:://github.com/jim-idle
|
||||
func (s *goRunStats) Analyze() {
|
||||
|
||||
// Look for anything that looks strange and record it in our local maps etc for the report to present it
|
||||
//
|
||||
s.CollectionAnomalies()
|
||||
s.TopNCollections()
|
||||
}
|
||||
|
||||
// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
|
||||
func (s *goRunStats) TopNCollections() {
|
||||
|
||||
// Let's sort the stat records by MaxSize
|
||||
//
|
||||
sort.Slice(s.jStats, func(i, j int) bool {
|
||||
return s.jStats[i].MaxSize > s.jStats[j].MaxSize
|
||||
})
|
||||
|
||||
for i := 0; i < len(s.jStats) && i < s.topN; i++ {
|
||||
s.topNByMax = append(s.topNByMax, s.jStats[i])
|
||||
}
|
||||
|
||||
// Sort by the number of times used
|
||||
//
|
||||
sort.Slice(s.jStats, func(i, j int) bool {
|
||||
return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
|
||||
})
|
||||
for i := 0; i < len(s.jStats) && i < s.topN; i++ {
|
||||
s.topNByUsed = append(s.topNByUsed, s.jStats[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
|
||||
// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
|
||||
// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
|
||||
func (s *goRunStats) Report(dir string, prefix string) error {
|
||||
|
||||
isDir, err := isDirectory(dir)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case !isDir:
|
||||
return fmt.Errorf("output directory `%s` is not a directory", dir)
|
||||
}
|
||||
s.reportCollections(dir, prefix)
|
||||
|
||||
// Clean out any old data in case the user forgets
|
||||
//
|
||||
s.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *goRunStats) Reset() {
|
||||
s.jStats = nil
|
||||
s.topNByUsed = nil
|
||||
s.topNByMax = nil
|
||||
}
|
||||
|
||||
func (s *goRunStats) reportCollections(dir, prefix string) {
|
||||
cname := filepath.Join(dir, ".asciidoctor")
|
||||
// If the file doesn't exist, create it, or append to the file
|
||||
f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, _ = f.WriteString(`// .asciidoctorconfig
|
||||
++++
|
||||
<style>
|
||||
body {
|
||||
font-family: "Quicksand", "Montserrat", "Helvetica";
|
||||
background-color: black;
|
||||
}
|
||||
</style>
|
||||
++++`)
|
||||
_ = f.Close()
|
||||
|
||||
fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
|
||||
// If the file doesn't exist, create it, or append to the file
|
||||
f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func(f *os.File) {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(f)
|
||||
_, _ = f.WriteString("= Collections for " + prefix + "\n\n")
|
||||
|
||||
_, _ = f.WriteString("== Summary\n")
|
||||
|
||||
if s.unusedCollections != nil {
|
||||
_, _ = f.WriteString("=== Unused Collections\n")
|
||||
_, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
|
||||
_, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
|
||||
_, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
|
||||
_, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
|
||||
_, _ = f.WriteString(" actually needed.\n\n")
|
||||
|
||||
_, _ = f.WriteString("\n.Unused collections\n")
|
||||
_, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Type | Count\n")
|
||||
|
||||
for k, v := range s.unusedCollections {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
|
||||
}
|
||||
f.WriteString("|===\n\n")
|
||||
}
|
||||
|
||||
_, _ = f.WriteString("\n.Summary of Collections\n")
|
||||
_, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Type | Count\n")
|
||||
for k, v := range s.counts {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
|
||||
}
|
||||
_, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
|
||||
_, _ = f.WriteString("|===\n\n")
|
||||
|
||||
_, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
|
||||
_, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
|
||||
for _, c := range s.topNByMax {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
|
||||
_, _ = f.WriteString("| " + c.Description + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
|
||||
_, _ = f.WriteString("\n")
|
||||
}
|
||||
_, _ = f.WriteString("|===\n\n")
|
||||
|
||||
_, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
|
||||
_, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
|
||||
_, _ = f.WriteString("|===\n")
|
||||
_, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
|
||||
for _, c := range s.topNByUsed {
|
||||
_, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
|
||||
_, _ = f.WriteString("| " + c.Description + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
|
||||
_, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
|
||||
_, _ = f.WriteString("\n")
|
||||
}
|
||||
_, _ = f.WriteString("|===\n\n")
|
||||
}
|
||||
|
||||
// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
|
||||
func (s *goRunStats) AddJStatRec(rec *JStatRec) {
|
||||
s.jStatsLock.Lock()
|
||||
defer s.jStatsLock.Unlock()
|
||||
s.jStats = append(s.jStats, rec)
|
||||
}
|
||||
|
||||
// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
|
||||
func (s *goRunStats) CollectionAnomalies() {
|
||||
s.jStatsLock.RLock()
|
||||
defer s.jStatsLock.RUnlock()
|
||||
s.counts = make(map[CollectionSource]int, len(s.jStats))
|
||||
for _, c := range s.jStats {
|
||||
|
||||
// Accumlate raw counts
|
||||
//
|
||||
s.counts[c.Source]++
|
||||
|
||||
// Look for allocated but unused collections and count them
|
||||
if c.MaxSize == 0 && c.Puts == 0 {
|
||||
if s.unusedCollections == nil {
|
||||
s.unusedCollections = make(map[CollectionSource]int)
|
||||
}
|
||||
s.unusedCollections[c.Source]++
|
||||
}
|
||||
if c.MaxSize > 6000 {
|
||||
fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
23
vendor/github.com/antlr4-go/antlr/v4/stats_data.go
generated
vendored
Normal file
23
vendor/github.com/antlr4-go/antlr/v4/stats_data.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package antlr
|
||||
|
||||
// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
|
||||
// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
|
||||
// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
|
||||
// for ideas on what can be gleaned from these statistics about collections.
|
||||
type JStatRec struct {
|
||||
Source CollectionSource
|
||||
MaxSize int
|
||||
CurSize int
|
||||
Gets int
|
||||
GetHits int
|
||||
GetMisses int
|
||||
GetHashConflicts int
|
||||
GetNoEnt int
|
||||
Puts int
|
||||
PutHits int
|
||||
PutMisses int
|
||||
PutHashConflicts int
|
||||
MaxSlotSize int
|
||||
Description string
|
||||
CreateStack []byte
|
||||
}
|
||||
@ -35,6 +35,8 @@ type Token interface {
|
||||
|
||||
GetTokenSource() TokenSource
|
||||
GetInputStream() CharStream
|
||||
|
||||
String() string
|
||||
}
|
||||
|
||||
type BaseToken struct {
|
||||
@ -53,7 +55,7 @@ type BaseToken struct {
|
||||
const (
|
||||
TokenInvalidType = 0
|
||||
|
||||
// During lookahead operations, this "token" signifies we hit rule end ATN state
|
||||
// TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
|
||||
// and did not follow it despite needing to.
|
||||
TokenEpsilon = -2
|
||||
|
||||
@ -61,15 +63,16 @@ const (
|
||||
|
||||
TokenEOF = -1
|
||||
|
||||
// All tokens go to the parser (unless Skip() is called in that rule)
|
||||
// TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
|
||||
//
|
||||
// All tokens go to the parser (unless [Skip] is called in the lexer rule)
|
||||
// on a particular "channel". The parser tunes to a particular channel
|
||||
// so that whitespace etc... can go to the parser on a "hidden" channel.
|
||||
|
||||
TokenDefaultChannel = 0
|
||||
|
||||
// Anything on different channel than DEFAULT_CHANNEL is not parsed
|
||||
// by parser.
|
||||
|
||||
// TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
|
||||
//
|
||||
// Anything on a different channel than TokenDefaultChannel is not parsed by parser.
|
||||
TokenHiddenChannel = 1
|
||||
)
|
||||
|
||||
@ -118,21 +121,22 @@ func (b *BaseToken) GetInputStream() CharStream {
|
||||
}
|
||||
|
||||
type CommonToken struct {
|
||||
*BaseToken
|
||||
BaseToken
|
||||
}
|
||||
|
||||
func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
|
||||
|
||||
t := new(CommonToken)
|
||||
t := &CommonToken{
|
||||
BaseToken: BaseToken{
|
||||
source: source,
|
||||
tokenType: tokenType,
|
||||
channel: channel,
|
||||
start: start,
|
||||
stop: stop,
|
||||
tokenIndex: -1,
|
||||
},
|
||||
}
|
||||
|
||||
t.BaseToken = new(BaseToken)
|
||||
|
||||
t.source = source
|
||||
t.tokenType = tokenType
|
||||
t.channel = channel
|
||||
t.start = start
|
||||
t.stop = stop
|
||||
t.tokenIndex = -1
|
||||
if t.source.tokenSource != nil {
|
||||
t.line = source.tokenSource.GetLine()
|
||||
t.column = source.tokenSource.GetCharPositionInLine()
|
||||
@ -8,13 +8,14 @@ type TokenStream interface {
|
||||
IntStream
|
||||
|
||||
LT(k int) Token
|
||||
Reset()
|
||||
|
||||
Get(index int) Token
|
||||
GetTokenSource() TokenSource
|
||||
SetTokenSource(TokenSource)
|
||||
|
||||
GetAllText() string
|
||||
GetTextFromInterval(*Interval) string
|
||||
GetTextFromInterval(Interval) string
|
||||
GetTextFromRuleContext(RuleContext) string
|
||||
GetTextFromTokens(Token, Token) string
|
||||
}
|
||||
@ -86,14 +86,15 @@ import (
|
||||
// first example shows.</p>
|
||||
|
||||
const (
|
||||
Default_Program_Name = "default"
|
||||
Program_Init_Size = 100
|
||||
Min_Token_Index = 0
|
||||
DefaultProgramName = "default"
|
||||
ProgramInitSize = 100
|
||||
MinTokenIndex = 0
|
||||
)
|
||||
|
||||
// Define the rewrite operation hierarchy
|
||||
|
||||
type RewriteOperation interface {
|
||||
|
||||
// Execute the rewrite operation by possibly adding to the buffer.
|
||||
// Return the index of the next token to operate on.
|
||||
Execute(buffer *bytes.Buffer) int
|
||||
@ -112,19 +113,19 @@ type RewriteOperation interface {
|
||||
|
||||
type BaseRewriteOperation struct {
|
||||
//Current index of rewrites list
|
||||
instruction_index int
|
||||
instructionIndex int
|
||||
//Token buffer index
|
||||
index int
|
||||
//Substitution text
|
||||
text string
|
||||
//Actual operation name
|
||||
op_name string
|
||||
opName string
|
||||
//Pointer to token steam
|
||||
tokens TokenStream
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetInstructionIndex() int {
|
||||
return op.instruction_index
|
||||
return op.instructionIndex
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetIndex() int {
|
||||
@ -136,7 +137,7 @@ func (op *BaseRewriteOperation) GetText() string {
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetOpName() string {
|
||||
return op.op_name
|
||||
return op.opName
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) GetTokens() TokenStream {
|
||||
@ -144,7 +145,7 @@ func (op *BaseRewriteOperation) GetTokens() TokenStream {
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
|
||||
op.instruction_index = val
|
||||
op.instructionIndex = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetIndex(val int) {
|
||||
@ -156,20 +157,20 @@ func (op *BaseRewriteOperation) SetText(val string) {
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetOpName(val string) {
|
||||
op.op_name = val
|
||||
op.opName = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
|
||||
op.tokens = val
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
|
||||
func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
|
||||
return op.index
|
||||
}
|
||||
|
||||
func (op *BaseRewriteOperation) String() string {
|
||||
return fmt.Sprintf("<%s@%d:\"%s\">",
|
||||
op.op_name,
|
||||
op.opName,
|
||||
op.tokens.Get(op.GetIndex()),
|
||||
op.text,
|
||||
)
|
||||
@ -182,10 +183,10 @@ type InsertBeforeOp struct {
|
||||
|
||||
func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
|
||||
return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: index,
|
||||
text: text,
|
||||
op_name: "InsertBeforeOp",
|
||||
tokens: stream,
|
||||
index: index,
|
||||
text: text,
|
||||
opName: "InsertBeforeOp",
|
||||
tokens: stream,
|
||||
}}
|
||||
}
|
||||
|
||||
@ -201,20 +202,21 @@ func (op *InsertBeforeOp) String() string {
|
||||
return op.BaseRewriteOperation.String()
|
||||
}
|
||||
|
||||
// Distinguish between insert after/before to do the "insert afters"
|
||||
// first and then the "insert befores" at same index. Implementation
|
||||
// of "insert after" is "insert before index+1".
|
||||
|
||||
// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
|
||||
// first and then the "insert before" instructions at same index. Implementation
|
||||
// of "insert after" is "insert before index+1".
|
||||
type InsertAfterOp struct {
|
||||
BaseRewriteOperation
|
||||
}
|
||||
|
||||
func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
|
||||
return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: index + 1,
|
||||
text: text,
|
||||
tokens: stream,
|
||||
}}
|
||||
return &InsertAfterOp{
|
||||
BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: index + 1,
|
||||
text: text,
|
||||
tokens: stream,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
|
||||
@ -229,7 +231,7 @@ func (op *InsertAfterOp) String() string {
|
||||
return op.BaseRewriteOperation.String()
|
||||
}
|
||||
|
||||
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
|
||||
// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
|
||||
// instructions.
|
||||
type ReplaceOp struct {
|
||||
BaseRewriteOperation
|
||||
@ -239,10 +241,10 @@ type ReplaceOp struct {
|
||||
func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
|
||||
return &ReplaceOp{
|
||||
BaseRewriteOperation: BaseRewriteOperation{
|
||||
index: from,
|
||||
text: text,
|
||||
op_name: "ReplaceOp",
|
||||
tokens: stream,
|
||||
index: from,
|
||||
text: text,
|
||||
opName: "ReplaceOp",
|
||||
tokens: stream,
|
||||
},
|
||||
LastIndex: to,
|
||||
}
|
||||
@ -270,17 +272,17 @@ type TokenStreamRewriter struct {
|
||||
// You may have multiple, named streams of rewrite operations.
|
||||
// I'm calling these things "programs."
|
||||
// Maps String (name) → rewrite (List)
|
||||
programs map[string][]RewriteOperation
|
||||
last_rewrite_token_indexes map[string]int
|
||||
programs map[string][]RewriteOperation
|
||||
lastRewriteTokenIndexes map[string]int
|
||||
}
|
||||
|
||||
func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
|
||||
return &TokenStreamRewriter{
|
||||
tokens: tokens,
|
||||
programs: map[string][]RewriteOperation{
|
||||
Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
|
||||
DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
|
||||
},
|
||||
last_rewrite_token_indexes: map[string]int{},
|
||||
lastRewriteTokenIndexes: map[string]int{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,110 +293,110 @@ func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
|
||||
// Rollback the instruction stream for a program so that
|
||||
// the indicated instruction (via instructionIndex) is no
|
||||
// longer in the stream. UNTESTED!
|
||||
func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
|
||||
is, ok := tsr.programs[program_name]
|
||||
func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
|
||||
is, ok := tsr.programs[programName]
|
||||
if ok {
|
||||
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
|
||||
tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
|
||||
}
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
|
||||
tsr.Rollback(Default_Program_Name, instruction_index)
|
||||
func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
|
||||
tsr.Rollback(DefaultProgramName, instructionIndex)
|
||||
}
|
||||
|
||||
// Reset the program so that no instructions exist
|
||||
func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
|
||||
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
|
||||
// DeleteProgram Reset the program so that no instructions exist
|
||||
func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
|
||||
tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
|
||||
tsr.DeleteProgram(Default_Program_Name)
|
||||
tsr.DeleteProgram(DefaultProgramName)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
|
||||
func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
|
||||
// to insert after, just insert before next index (even if past end)
|
||||
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
rewrites := tsr.GetProgram(programName)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(program_name, op)
|
||||
tsr.AddToProgram(programName, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
|
||||
tsr.InsertAfter(Default_Program_Name, index, text)
|
||||
tsr.InsertAfter(DefaultProgramName, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
|
||||
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
|
||||
func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
|
||||
tsr.InsertAfter(programName, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
|
||||
func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
|
||||
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
rewrites := tsr.GetProgram(programName)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(program_name, op)
|
||||
tsr.AddToProgram(programName, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
|
||||
tsr.InsertBefore(Default_Program_Name, index, text)
|
||||
tsr.InsertBefore(DefaultProgramName, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
|
||||
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
|
||||
func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
|
||||
tsr.InsertBefore(programName, token.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
|
||||
func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
|
||||
if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
|
||||
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
|
||||
from, to, tsr.tokens.Size()))
|
||||
}
|
||||
var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
|
||||
rewrites := tsr.GetProgram(program_name)
|
||||
rewrites := tsr.GetProgram(programName)
|
||||
op.SetInstructionIndex(len(rewrites))
|
||||
tsr.AddToProgram(program_name, op)
|
||||
tsr.AddToProgram(programName, op)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
|
||||
tsr.Replace(Default_Program_Name, from, to, text)
|
||||
tsr.Replace(DefaultProgramName, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
|
||||
tsr.ReplaceDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
|
||||
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
|
||||
func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
|
||||
tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
|
||||
tsr.ReplaceToken(Default_Program_Name, from, to, text)
|
||||
tsr.ReplaceToken(DefaultProgramName, from, to, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
|
||||
tsr.ReplaceTokenDefault(index, index, text)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
|
||||
tsr.Replace(program_name, from, to, "")
|
||||
func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
|
||||
tsr.Replace(programName, from, to, "")
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
|
||||
tsr.Delete(Default_Program_Name, from, to)
|
||||
tsr.Delete(DefaultProgramName, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
|
||||
tsr.DeleteDefault(index, index)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
|
||||
tsr.ReplaceToken(program_name, from, to, "")
|
||||
func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
|
||||
tsr.ReplaceToken(programName, from, to, "")
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
|
||||
tsr.DeleteToken(Default_Program_Name, from, to)
|
||||
tsr.DeleteToken(DefaultProgramName, from, to)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
|
||||
i, ok := tsr.last_rewrite_token_indexes[program_name]
|
||||
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
|
||||
i, ok := tsr.lastRewriteTokenIndexes[programName]
|
||||
if !ok {
|
||||
return -1
|
||||
}
|
||||
@ -402,15 +404,15 @@ func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) in
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
|
||||
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
|
||||
return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
|
||||
tsr.last_rewrite_token_indexes[program_name] = i
|
||||
func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
|
||||
tsr.lastRewriteTokenIndexes[programName] = i
|
||||
}
|
||||
|
||||
func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
|
||||
is := make([]RewriteOperation, 0, Program_Init_Size)
|
||||
is := make([]RewriteOperation, 0, ProgramInitSize)
|
||||
tsr.programs[name] = is
|
||||
return is
|
||||
}
|
||||
@ -429,24 +431,24 @@ func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
|
||||
return is
|
||||
}
|
||||
|
||||
// Return the text from the original tokens altered per the
|
||||
// GetTextDefault returns the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter) GetTextDefault() string {
|
||||
return tsr.GetText(
|
||||
Default_Program_Name,
|
||||
DefaultProgramName,
|
||||
NewInterval(0, tsr.tokens.Size()-1))
|
||||
}
|
||||
|
||||
// Return the text from the original tokens altered per the
|
||||
// GetText returns the text from the original tokens altered per the
|
||||
// instructions given to this rewriter.
|
||||
func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
|
||||
rewrites := tsr.programs[program_name]
|
||||
func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
|
||||
rewrites := tsr.programs[programName]
|
||||
start := interval.Start
|
||||
stop := interval.Stop
|
||||
// ensure start/end are in range
|
||||
stop = min(stop, tsr.tokens.Size()-1)
|
||||
start = max(start, 0)
|
||||
if rewrites == nil || len(rewrites) == 0 {
|
||||
if len(rewrites) == 0 {
|
||||
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
|
||||
}
|
||||
buf := bytes.Buffer{}
|
||||
@ -482,11 +484,13 @@ func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// We need to combine operations and report invalid operations (like
|
||||
// overlapping replaces that are not completed nested). Inserts to
|
||||
// same index need to be combined etc... Here are the cases:
|
||||
// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
|
||||
// overlapping replaces that are not completed nested). Inserts to
|
||||
// same index need to be combined etc...
|
||||
//
|
||||
// I.i.u I.j.v leave alone, nonoverlapping
|
||||
// Here are the cases:
|
||||
//
|
||||
// I.i.u I.j.v leave alone, non-overlapping
|
||||
// I.i.u I.i.v combine: Iivu
|
||||
//
|
||||
// R.i-j.u R.x-y.v | i-j in x-y delete first R
|
||||
@ -498,38 +502,38 @@ func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval)
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
//
|
||||
// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
|
||||
// we're not deleting i)
|
||||
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
|
||||
// we're not deleting i)
|
||||
// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
|
||||
// R.x-y.v I.i.u | i in x-y ERROR
|
||||
// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
|
||||
// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
|
||||
// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
|
||||
//
|
||||
// I.i.u = insert u before op @ index i
|
||||
// R.x-y.u = replace x-y indexed tokens with u
|
||||
//
|
||||
// First we need to examine replaces. For any replace op:
|
||||
// First we need to examine replaces. For any replace op:
|
||||
//
|
||||
// 1. wipe out any insertions before op within that range.
|
||||
// 2. Drop any replace op before that is contained completely within
|
||||
// that range.
|
||||
// 3. Throw exception upon boundary overlap with any previous replace.
|
||||
// 1. wipe out any insertions before op within that range.
|
||||
// 2. Drop any replace op before that is contained completely within
|
||||
// that range.
|
||||
// 3. Throw exception upon boundary overlap with any previous replace.
|
||||
//
|
||||
// Then we can deal with inserts:
|
||||
// Then we can deal with inserts:
|
||||
//
|
||||
// 1. for any inserts to same index, combine even if not adjacent.
|
||||
// 2. for any prior replace with same left boundary, combine this
|
||||
// insert with replace and delete this replace.
|
||||
// 3. throw exception if index in same range as previous replace
|
||||
// 1. for any inserts to same index, combine even if not adjacent.
|
||||
// 2. for any prior replace with same left boundary, combine this
|
||||
// insert with replace and delete this 'replace'.
|
||||
// 3. throw exception if index in same range as previous replace
|
||||
//
|
||||
// Don't actually delete; make op null in list. Easier to walk list.
|
||||
// Later we can throw as we add to index → op map.
|
||||
// Don't actually delete; make op null in list. Easier to walk list.
|
||||
// Later we can throw as we add to index → op map.
|
||||
//
|
||||
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
// inserted stuff would be before the replace range. But, if you
|
||||
// add tokens in front of a method body '{' and then delete the method
|
||||
// body, I think the stuff before the '{' you added should disappear too.
|
||||
// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
|
||||
// inserted stuff would be before the 'replace' range. But, if you
|
||||
// add tokens in front of a method body '{' and then delete the method
|
||||
// body, I think the stuff before the '{' you added should disappear too.
|
||||
//
|
||||
// Return a map from token index to operation.
|
||||
// The func returns a map from token index to operation.
|
||||
func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
|
||||
// WALK REPLACES
|
||||
for i := 0; i < len(rewrites); i++ {
|
||||
@ -547,7 +551,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
|
||||
if iop.index == rop.index {
|
||||
// E.g., insert before 2, delete 2..2; update replace
|
||||
// text to include insert before, kill insert
|
||||
rewrites[iop.instruction_index] = nil
|
||||
rewrites[iop.instructionIndex] = nil
|
||||
if rop.text != "" {
|
||||
rop.text = iop.text + rop.text
|
||||
} else {
|
||||
@ -555,7 +559,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
|
||||
}
|
||||
} else if iop.index > rop.index && iop.index <= rop.LastIndex {
|
||||
// delete insert as it's a no-op.
|
||||
rewrites[iop.instruction_index] = nil
|
||||
rewrites[iop.instructionIndex] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -564,7 +568,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
|
||||
if prevop, ok := rewrites[j].(*ReplaceOp); ok {
|
||||
if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
|
||||
// delete replace as it's a no-op.
|
||||
rewrites[prevop.instruction_index] = nil
|
||||
rewrites[prevop.instructionIndex] = nil
|
||||
continue
|
||||
}
|
||||
// throw exception unless disjoint or identical
|
||||
@ -572,10 +576,9 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
|
||||
// Delete special case of replace (text==null):
|
||||
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
|
||||
if prevop.text == "" && rop.text == "" && !disjoint {
|
||||
rewrites[prevop.instruction_index] = nil
|
||||
rewrites[prevop.instructionIndex] = nil
|
||||
rop.index = min(prevop.index, rop.index)
|
||||
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
|
||||
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
|
||||
} else if !disjoint {
|
||||
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
|
||||
}
|
||||
@ -607,7 +610,7 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
|
||||
if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
|
||||
if prevIop.index == iop.GetIndex() {
|
||||
iop.SetText(iop.GetText() + prevIop.text)
|
||||
rewrites[prevIop.instruction_index] = nil
|
||||
rewrites[prevIop.instructionIndex] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -72,7 +72,7 @@ func (t *BaseTransition) getSerializationType() int {
|
||||
return t.serializationType
|
||||
}
|
||||
|
||||
func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *BaseTransition) Matches(_, _, _ int) bool {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
@ -89,6 +89,7 @@ const (
|
||||
TransitionPRECEDENCE = 10
|
||||
)
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var TransitionserializationNames = []string{
|
||||
"INVALID",
|
||||
"EPSILON",
|
||||
@ -127,19 +128,22 @@ var TransitionserializationNames = []string{
|
||||
// TransitionPRECEDENCE
|
||||
//}
|
||||
|
||||
// AtomTransition
|
||||
// TODO: make all transitions sets? no, should remove set edges
|
||||
type AtomTransition struct {
|
||||
*BaseTransition
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
|
||||
|
||||
t := new(AtomTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.label = intervalSet // The token type or character value or, signifies special intervalSet.
|
||||
t := &AtomTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionATOM,
|
||||
label: intervalSet,
|
||||
isEpsilon: false,
|
||||
},
|
||||
}
|
||||
t.intervalSet = t.makeLabel()
|
||||
t.serializationType = TransitionATOM
|
||||
|
||||
return t
|
||||
}
|
||||
@ -150,7 +154,7 @@ func (t *AtomTransition) makeLabel() *IntervalSet {
|
||||
return s
|
||||
}
|
||||
|
||||
func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *AtomTransition) Matches(symbol, _, _ int) bool {
|
||||
return t.label == symbol
|
||||
}
|
||||
|
||||
@ -159,48 +163,45 @@ func (t *AtomTransition) String() string {
|
||||
}
|
||||
|
||||
type RuleTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
BaseTransition
|
||||
followState ATNState
|
||||
ruleIndex, precedence int
|
||||
}
|
||||
|
||||
func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
|
||||
|
||||
t := new(RuleTransition)
|
||||
t.BaseTransition = NewBaseTransition(ruleStart)
|
||||
|
||||
t.ruleIndex = ruleIndex
|
||||
t.precedence = precedence
|
||||
t.followState = followState
|
||||
t.serializationType = TransitionRULE
|
||||
t.isEpsilon = true
|
||||
|
||||
return t
|
||||
return &RuleTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: ruleStart,
|
||||
isEpsilon: true,
|
||||
serializationType: TransitionRULE,
|
||||
},
|
||||
ruleIndex: ruleIndex,
|
||||
precedence: precedence,
|
||||
followState: followState,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *RuleTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type EpsilonTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
BaseTransition
|
||||
outermostPrecedenceReturn int
|
||||
}
|
||||
|
||||
func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
|
||||
|
||||
t := new(EpsilonTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionEPSILON
|
||||
t.isEpsilon = true
|
||||
t.outermostPrecedenceReturn = outermostPrecedenceReturn
|
||||
return t
|
||||
return &EpsilonTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionEPSILON,
|
||||
isEpsilon: true,
|
||||
},
|
||||
outermostPrecedenceReturn: outermostPrecedenceReturn,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *EpsilonTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -209,19 +210,20 @@ func (t *EpsilonTransition) String() string {
|
||||
}
|
||||
|
||||
type RangeTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
BaseTransition
|
||||
start, stop int
|
||||
}
|
||||
|
||||
func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
|
||||
|
||||
t := new(RangeTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionRANGE
|
||||
t.start = start
|
||||
t.stop = stop
|
||||
t := &RangeTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionRANGE,
|
||||
isEpsilon: false,
|
||||
},
|
||||
start: start,
|
||||
stop: stop,
|
||||
}
|
||||
t.intervalSet = t.makeLabel()
|
||||
return t
|
||||
}
|
||||
@ -232,7 +234,7 @@ func (t *RangeTransition) makeLabel() *IntervalSet {
|
||||
return s
|
||||
}
|
||||
|
||||
func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *RangeTransition) Matches(symbol, _, _ int) bool {
|
||||
return symbol >= t.start && symbol <= t.stop
|
||||
}
|
||||
|
||||
@ -252,40 +254,41 @@ type AbstractPredicateTransition interface {
|
||||
}
|
||||
|
||||
type BaseAbstractPredicateTransition struct {
|
||||
*BaseTransition
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
|
||||
|
||||
t := new(BaseAbstractPredicateTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
return t
|
||||
return &BaseAbstractPredicateTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
|
||||
|
||||
type PredicateTransition struct {
|
||||
*BaseAbstractPredicateTransition
|
||||
|
||||
BaseAbstractPredicateTransition
|
||||
isCtxDependent bool
|
||||
ruleIndex, predIndex int
|
||||
}
|
||||
|
||||
func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
|
||||
|
||||
t := new(PredicateTransition)
|
||||
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
|
||||
|
||||
t.serializationType = TransitionPREDICATE
|
||||
t.ruleIndex = ruleIndex
|
||||
t.predIndex = predIndex
|
||||
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
||||
t.isEpsilon = true
|
||||
return t
|
||||
return &PredicateTransition{
|
||||
BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionPREDICATE,
|
||||
isEpsilon: true,
|
||||
},
|
||||
},
|
||||
isCtxDependent: isCtxDependent,
|
||||
ruleIndex: ruleIndex,
|
||||
predIndex: predIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *PredicateTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -298,26 +301,25 @@ func (t *PredicateTransition) String() string {
|
||||
}
|
||||
|
||||
type ActionTransition struct {
|
||||
*BaseTransition
|
||||
|
||||
BaseTransition
|
||||
isCtxDependent bool
|
||||
ruleIndex, actionIndex, predIndex int
|
||||
}
|
||||
|
||||
func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
|
||||
|
||||
t := new(ActionTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionACTION
|
||||
t.ruleIndex = ruleIndex
|
||||
t.actionIndex = actionIndex
|
||||
t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
|
||||
t.isEpsilon = true
|
||||
return t
|
||||
return &ActionTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionACTION,
|
||||
isEpsilon: true,
|
||||
},
|
||||
isCtxDependent: isCtxDependent,
|
||||
ruleIndex: ruleIndex,
|
||||
actionIndex: actionIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *ActionTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -326,26 +328,27 @@ func (t *ActionTransition) String() string {
|
||||
}
|
||||
|
||||
type SetTransition struct {
|
||||
*BaseTransition
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
|
||||
t := &SetTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionSET,
|
||||
},
|
||||
}
|
||||
|
||||
t := new(SetTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionSET
|
||||
if set != nil {
|
||||
t.intervalSet = set
|
||||
} else {
|
||||
t.intervalSet = NewIntervalSet()
|
||||
t.intervalSet.addOne(TokenInvalidType)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *SetTransition) Matches(symbol, _, _ int) bool {
|
||||
return t.intervalSet.contains(symbol)
|
||||
}
|
||||
|
||||
@ -354,16 +357,24 @@ func (t *SetTransition) String() string {
|
||||
}
|
||||
|
||||
type NotSetTransition struct {
|
||||
*SetTransition
|
||||
SetTransition
|
||||
}
|
||||
|
||||
func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
|
||||
|
||||
t := new(NotSetTransition)
|
||||
|
||||
t.SetTransition = NewSetTransition(target, set)
|
||||
|
||||
t.serializationType = TransitionNOTSET
|
||||
t := &NotSetTransition{
|
||||
SetTransition: SetTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionNOTSET,
|
||||
},
|
||||
},
|
||||
}
|
||||
if set != nil {
|
||||
t.intervalSet = set
|
||||
} else {
|
||||
t.intervalSet = NewIntervalSet()
|
||||
t.intervalSet.addOne(TokenInvalidType)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
@ -377,16 +388,16 @@ func (t *NotSetTransition) String() string {
|
||||
}
|
||||
|
||||
type WildcardTransition struct {
|
||||
*BaseTransition
|
||||
BaseTransition
|
||||
}
|
||||
|
||||
func NewWildcardTransition(target ATNState) *WildcardTransition {
|
||||
|
||||
t := new(WildcardTransition)
|
||||
t.BaseTransition = NewBaseTransition(target)
|
||||
|
||||
t.serializationType = TransitionWILDCARD
|
||||
return t
|
||||
return &WildcardTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionWILDCARD,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
@ -398,24 +409,24 @@ func (t *WildcardTransition) String() string {
|
||||
}
|
||||
|
||||
type PrecedencePredicateTransition struct {
|
||||
*BaseAbstractPredicateTransition
|
||||
|
||||
BaseAbstractPredicateTransition
|
||||
precedence int
|
||||
}
|
||||
|
||||
func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
|
||||
|
||||
t := new(PrecedencePredicateTransition)
|
||||
t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
|
||||
|
||||
t.serializationType = TransitionPRECEDENCE
|
||||
t.precedence = precedence
|
||||
t.isEpsilon = true
|
||||
|
||||
return t
|
||||
return &PrecedencePredicateTransition{
|
||||
BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
|
||||
BaseTransition: BaseTransition{
|
||||
target: target,
|
||||
serializationType: TransitionPRECEDENCE,
|
||||
isEpsilon: true,
|
||||
},
|
||||
},
|
||||
precedence: precedence,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
|
||||
func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -21,29 +21,23 @@ type Tree interface {
|
||||
|
||||
type SyntaxTree interface {
|
||||
Tree
|
||||
|
||||
GetSourceInterval() *Interval
|
||||
GetSourceInterval() Interval
|
||||
}
|
||||
|
||||
type ParseTree interface {
|
||||
SyntaxTree
|
||||
|
||||
Accept(Visitor ParseTreeVisitor) interface{}
|
||||
GetText() string
|
||||
|
||||
ToStringTree([]string, Recognizer) string
|
||||
}
|
||||
|
||||
type RuleNode interface {
|
||||
ParseTree
|
||||
|
||||
GetRuleContext() RuleContext
|
||||
GetBaseRuleContext() *BaseRuleContext
|
||||
}
|
||||
|
||||
type TerminalNode interface {
|
||||
ParseTree
|
||||
|
||||
GetSymbol() Token
|
||||
}
|
||||
|
||||
@ -64,12 +58,12 @@ type BaseParseTreeVisitor struct{}
|
||||
|
||||
var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
|
||||
|
||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
|
||||
func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
|
||||
func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
|
||||
func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
|
||||
|
||||
// TODO
|
||||
// TODO: Implement this?
|
||||
//func (this ParseTreeVisitor) Visit(ctx) {
|
||||
// if (Utils.isArray(ctx)) {
|
||||
// self := this
|
||||
@ -101,15 +95,14 @@ type BaseParseTreeListener struct{}
|
||||
|
||||
var _ ParseTreeListener = &BaseParseTreeListener{}
|
||||
|
||||
func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
|
||||
func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
|
||||
func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
|
||||
func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
|
||||
func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
|
||||
func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
|
||||
func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
|
||||
func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
|
||||
|
||||
type TerminalNodeImpl struct {
|
||||
parentCtx RuleContext
|
||||
|
||||
symbol Token
|
||||
symbol Token
|
||||
}
|
||||
|
||||
var _ TerminalNode = &TerminalNodeImpl{}
|
||||
@ -123,7 +116,7 @@ func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
|
||||
return tn
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetChild(i int) Tree {
|
||||
func (t *TerminalNodeImpl) GetChild(_ int) Tree {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -131,7 +124,7 @@ func (t *TerminalNodeImpl) GetChildren() []Tree {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
|
||||
func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
|
||||
panic("Cannot set children on terminal node")
|
||||
}
|
||||
|
||||
@ -151,7 +144,7 @@ func (t *TerminalNodeImpl) GetPayload() interface{} {
|
||||
return t.symbol
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
|
||||
func (t *TerminalNodeImpl) GetSourceInterval() Interval {
|
||||
if t.symbol == nil {
|
||||
return TreeInvalidInterval
|
||||
}
|
||||
@ -179,7 +172,7 @@ func (t *TerminalNodeImpl) String() string {
|
||||
return t.symbol.GetText()
|
||||
}
|
||||
|
||||
func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
|
||||
func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
|
||||
return t.String()
|
||||
}
|
||||
|
||||
@ -214,10 +207,9 @@ func NewParseTreeWalker() *ParseTreeWalker {
|
||||
return new(ParseTreeWalker)
|
||||
}
|
||||
|
||||
// Performs a walk on the given parse tree starting at the root and going down recursively
|
||||
// with depth-first search. On each node, EnterRule is called before
|
||||
// recursively walking down into child nodes, then
|
||||
// ExitRule is called after the recursive call to wind up.
|
||||
// Walk performs a walk on the given parse tree starting at the root and going down recursively
|
||||
// with depth-first search. On each node, [EnterRule] is called before
|
||||
// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
|
||||
func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
||||
switch tt := t.(type) {
|
||||
case ErrorNode:
|
||||
@ -234,7 +226,7 @@ func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
||||
}
|
||||
}
|
||||
|
||||
// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
|
||||
// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
|
||||
// then by triggering the event specific to the given parse tree node
|
||||
func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
@ -242,12 +234,71 @@ func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx.EnterRule(listener)
|
||||
}
|
||||
|
||||
// Exits a grammar rule by first triggering the event specific to the given parse tree node
|
||||
// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
|
||||
// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
|
||||
// then by triggering the generic event [ParseTreeListener].ExitEveryRule
|
||||
func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
|
||||
ctx := r.GetRuleContext().(ParserRuleContext)
|
||||
ctx.ExitRule(listener)
|
||||
listener.ExitEveryRule(ctx)
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedGlobalVariable
|
||||
var ParseTreeWalkerDefault = NewParseTreeWalker()
|
||||
|
||||
type IterativeParseTreeWalker struct {
|
||||
*ParseTreeWalker
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
|
||||
return new(IterativeParseTreeWalker)
|
||||
}
|
||||
|
||||
func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
|
||||
var stack []Tree
|
||||
var indexStack []int
|
||||
currentNode := t
|
||||
currentIndex := 0
|
||||
|
||||
for currentNode != nil {
|
||||
// pre-order visit
|
||||
switch tt := currentNode.(type) {
|
||||
case ErrorNode:
|
||||
listener.VisitErrorNode(tt)
|
||||
case TerminalNode:
|
||||
listener.VisitTerminal(tt)
|
||||
default:
|
||||
i.EnterRule(listener, currentNode.(RuleNode))
|
||||
}
|
||||
// Move down to first child, if exists
|
||||
if currentNode.GetChildCount() > 0 {
|
||||
stack = append(stack, currentNode)
|
||||
indexStack = append(indexStack, currentIndex)
|
||||
currentIndex = 0
|
||||
currentNode = currentNode.GetChild(0)
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
// post-order visit
|
||||
if ruleNode, ok := currentNode.(RuleNode); ok {
|
||||
i.ExitRule(listener, ruleNode)
|
||||
}
|
||||
// No parent, so no siblings
|
||||
if len(stack) == 0 {
|
||||
currentNode = nil
|
||||
currentIndex = 0
|
||||
break
|
||||
}
|
||||
// Move to next sibling if possible
|
||||
currentIndex++
|
||||
if stack[len(stack)-1].GetChildCount() > currentIndex {
|
||||
currentNode = stack[len(stack)-1].GetChild(currentIndex)
|
||||
break
|
||||
}
|
||||
// No next, sibling, so move up
|
||||
currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
|
||||
currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -8,10 +8,8 @@ import "fmt"
|
||||
|
||||
/** A set of utility routines useful for all kinds of ANTLR trees. */
|
||||
|
||||
// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
|
||||
//
|
||||
// node payloads to get the text for the nodes. Detect
|
||||
// parse trees and extract data appropriately.
|
||||
// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
|
||||
// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
|
||||
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
|
||||
|
||||
if recog != nil {
|
||||
@ -32,7 +30,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
|
||||
}
|
||||
for i := 1; i < c; i++ {
|
||||
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
|
||||
res += (" " + s)
|
||||
res += " " + s
|
||||
}
|
||||
res += ")"
|
||||
return res
|
||||
@ -62,7 +60,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
|
||||
}
|
||||
}
|
||||
|
||||
// no recog for rule names
|
||||
// no recognition for rule names
|
||||
payload := t.GetPayload()
|
||||
if p2, ok := payload.(Token); ok {
|
||||
return p2.GetText()
|
||||
@ -71,7 +69,9 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
|
||||
return fmt.Sprint(t.GetPayload())
|
||||
}
|
||||
|
||||
// Return ordered list of all children of this node
|
||||
// TreesGetChildren returns am ordered list of all children of this node
|
||||
//
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesGetChildren(t Tree) []Tree {
|
||||
list := make([]Tree, 0)
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
@ -80,9 +80,10 @@ func TreesGetChildren(t Tree) []Tree {
|
||||
return list
|
||||
}
|
||||
|
||||
// Return a list of all ancestors of this node. The first node of
|
||||
// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
|
||||
// and the last node is the parent of this node.
|
||||
//
|
||||
// list is the root and the last is the parent of this node.
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesgetAncestors(t Tree) []Tree {
|
||||
ancestors := make([]Tree, 0)
|
||||
t = t.GetParent()
|
||||
@ -94,10 +95,12 @@ func TreesgetAncestors(t Tree) []Tree {
|
||||
return ancestors
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
|
||||
return TreesfindAllNodes(t, ttype, true)
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
|
||||
return TreesfindAllNodes(t, ruleIndex, false)
|
||||
}
|
||||
@ -129,6 +132,7 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr
|
||||
}
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TreesDescendants(t ParseTree) []ParseTree {
|
||||
nodes := []ParseTree{t}
|
||||
for i := 0; i < t.GetChildCount(); i++ {
|
||||
@ -9,8 +9,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func intMin(a, b int) int {
|
||||
@ -31,7 +33,7 @@ func intMax(a, b int) int {
|
||||
|
||||
type IntStack []int
|
||||
|
||||
var ErrEmptyStack = errors.New("Stack is empty")
|
||||
var ErrEmptyStack = errors.New("stack is empty")
|
||||
|
||||
func (s *IntStack) Pop() (int, error) {
|
||||
l := len(*s) - 1
|
||||
@ -47,33 +49,13 @@ func (s *IntStack) Push(e int) {
|
||||
*s = append(*s, e)
|
||||
}
|
||||
|
||||
type comparable interface {
|
||||
Equals(other Collectable[any]) bool
|
||||
}
|
||||
|
||||
func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool {
|
||||
|
||||
return a.Equals(b)
|
||||
}
|
||||
|
||||
func standardHashFunction(a interface{}) int {
|
||||
if h, ok := a.(hasher); ok {
|
||||
return h.Hash()
|
||||
}
|
||||
|
||||
panic("Not Hasher")
|
||||
}
|
||||
|
||||
type hasher interface {
|
||||
Hash() int
|
||||
}
|
||||
|
||||
const bitsPerWord = 64
|
||||
|
||||
func indexForBit(bit int) int {
|
||||
return bit / bitsPerWord
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
|
||||
func wordForBit(data []uint64, bit int) uint64 {
|
||||
idx := indexForBit(bit)
|
||||
if idx >= len(data) {
|
||||
@ -94,6 +76,8 @@ type BitSet struct {
|
||||
data []uint64
|
||||
}
|
||||
|
||||
// NewBitSet creates a new bitwise set
|
||||
// TODO: See if we can replace with the standard library's BitSet
|
||||
func NewBitSet() *BitSet {
|
||||
return &BitSet{}
|
||||
}
|
||||
@ -123,7 +107,7 @@ func (b *BitSet) or(set *BitSet) {
|
||||
setLen := set.minLen()
|
||||
maxLen := intMax(bLen, setLen)
|
||||
if maxLen > len(b.data) {
|
||||
// Increase the size of len(b.data) to repesent the bits in both sets.
|
||||
// Increase the size of len(b.data) to represent the bits in both sets.
|
||||
data := make([]uint64, maxLen)
|
||||
copy(data, b.data)
|
||||
b.data = data
|
||||
@ -246,37 +230,6 @@ func (a *AltDict) values() []interface{} {
|
||||
return vs
|
||||
}
|
||||
|
||||
type DoubleDict struct {
|
||||
data map[int]map[int]interface{}
|
||||
}
|
||||
|
||||
func NewDoubleDict() *DoubleDict {
|
||||
dd := new(DoubleDict)
|
||||
dd.data = make(map[int]map[int]interface{})
|
||||
return dd
|
||||
}
|
||||
|
||||
func (d *DoubleDict) Get(a, b int) interface{} {
|
||||
data := d.data[a]
|
||||
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return data[b]
|
||||
}
|
||||
|
||||
func (d *DoubleDict) set(a, b int, o interface{}) {
|
||||
data := d.data[a]
|
||||
|
||||
if data == nil {
|
||||
data = make(map[int]interface{})
|
||||
d.data[a] = data
|
||||
}
|
||||
|
||||
data[b] = o
|
||||
}
|
||||
|
||||
func EscapeWhitespace(s string, escapeSpaces bool) string {
|
||||
|
||||
s = strings.Replace(s, "\t", "\\t", -1)
|
||||
@ -288,6 +241,7 @@ func EscapeWhitespace(s string, escapeSpaces bool) string {
|
||||
return s
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func TerminalNodeToStringArray(sa []TerminalNode) []string {
|
||||
st := make([]string, len(sa))
|
||||
|
||||
@ -298,6 +252,7 @@ func TerminalNodeToStringArray(sa []TerminalNode) []string {
|
||||
return st
|
||||
}
|
||||
|
||||
//goland:noinspection GoUnusedExportedFunction
|
||||
func PrintArrayJavaStyle(sa []string) string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
@ -350,3 +305,24 @@ func murmurFinish(h int, numberOfWords int) int {
|
||||
|
||||
return int(hash)
|
||||
}
|
||||
|
||||
func isDirectory(dir string) (bool, error) {
|
||||
fileInfo, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, syscall.ENOENT):
|
||||
// The given directory does not exist, so we will try to create it
|
||||
//
|
||||
err = os.MkdirAll(dir, 0755)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
}
|
||||
return fileInfo.IsDir(), err
|
||||
}
|
||||
4
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
4
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
@ -21,8 +21,6 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
|
||||
57
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
57
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
@ -71,6 +71,9 @@ type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
|
||||
type ExponentialBackOffOpts func(*ExponentialBackOff)
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
@ -81,7 +84,7 @@ const (
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
@ -91,10 +94,62 @@ func NewExponentialBackOff() *ExponentialBackOff {
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
for _, fn := range opts {
|
||||
fn(b)
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// WithInitialInterval sets the initial interval between retries.
|
||||
func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.InitialInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
|
||||
func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.RandomizationFactor = randomizationFactor
|
||||
}
|
||||
}
|
||||
|
||||
// WithMultiplier sets the multiplier for increasing the interval after each retry.
|
||||
func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Multiplier = multiplier
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxInterval sets the maximum interval between retries.
|
||||
func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime sets the maximum total time for retries.
|
||||
func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxElapsedTime = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryStopDuration sets the duration after which retries should stop.
|
||||
func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Stop = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockProvider sets the clock used to measure time.
|
||||
func WithClockProvider(clock Clock) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
|
||||
1
vendor/github.com/distribution/reference/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/distribution/reference/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.go text eol=lf
|
||||
2
vendor/github.com/distribution/reference/.gitignore
generated
vendored
Normal file
2
vendor/github.com/distribution/reference/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
# Cover profiles
|
||||
*.out
|
||||
18
vendor/github.com/distribution/reference/.golangci.yml
generated
vendored
Normal file
18
vendor/github.com/distribution/reference/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- dupword # Checks for duplicate words in the source code
|
||||
- gofmt
|
||||
- goimports
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unused
|
||||
- vet
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
run:
|
||||
deadline: 2m
|
||||
5
vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
generated
vendored
Normal file
5
vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Code of Conduct
|
||||
|
||||
We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.
|
||||
114
vendor/github.com/distribution/reference/CONTRIBUTING.md
generated
vendored
Normal file
114
vendor/github.com/distribution/reference/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
# Contributing to the reference library
|
||||
|
||||
## Community help
|
||||
|
||||
If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
|
||||
[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
|
||||
|
||||
## Reporting security issues
|
||||
|
||||
The maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
By following these simple rules you will get better and faster feedback on your issue.
|
||||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
||||
- comment if you have some new, technical and relevant information to add to the case
|
||||
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
|
||||
|
||||
### If you have not found an existing issue that describes your problem:
|
||||
|
||||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of (or similar for other container tools):
|
||||
- `docker version`
|
||||
- `docker info`
|
||||
- `docker exec <registry-container> registry --version`
|
||||
3. copy the command line you used to launch your Registry
|
||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
||||
5. reproduce your problem and get your docker daemon logs showing the error
|
||||
6. if relevant, copy your registry logs that show the error
|
||||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
||||
|
||||
## Contributing Code
|
||||
|
||||
Contributions should be made via pull requests. Pull requests will be reviewed
|
||||
by one or more maintainers or reviewers and merged when acceptable.
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. Use your own [fork](https://help.github.com/en/articles/about-forks)
|
||||
2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
3. Test your code
|
||||
4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
|
||||
5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
|
||||
|
||||
Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
|
||||
for tips on creating a successful contribution.
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
144
vendor/github.com/distribution/reference/GOVERNANCE.md
generated
vendored
Normal file
144
vendor/github.com/distribution/reference/GOVERNANCE.md
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
# distribution/reference Project Governance
|
||||
|
||||
Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
|
||||
|
||||
For specific guidance on practical contribution steps please
|
||||
see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
|
||||
|
||||
## Maintainership
|
||||
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
|
||||
## Reviewers
|
||||
|
||||
A reviewer is a core role within the project.
|
||||
They share in reviewing issues and pull requests and their LGTM counts towards the
|
||||
required LGTM count to merge a code change into the project.
|
||||
|
||||
Reviewers are part of the organization but do not have write access.
|
||||
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
|
||||
|
||||
## Adding maintainers
|
||||
|
||||
Maintainers are first and foremost contributors that have shown they are
|
||||
committed to the long term success of a project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code, pull
|
||||
request review, and triage of issues in the project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors that have
|
||||
shown regular activity on the project over the prior months. From this list,
|
||||
maintainer candidates are selected and proposed in a pull request or a
|
||||
maintainers communication channel.
|
||||
|
||||
After a candidate has been announced to the maintainers, the existing
|
||||
maintainers are given five business days to discuss the candidate, raise
|
||||
objections and cast their vote. Votes may take place on the communication
|
||||
channel or via pull request comment. Candidates must be approved by at least 66%
|
||||
of the current maintainers by adding their vote on the mailing list. The
|
||||
reviewer role has the same process but only requires 33% of current maintainers.
|
||||
Only maintainers of the repository that the candidate is proposed for are
|
||||
allowed to vote.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The voting process may take place inside a pull request if a
|
||||
maintainer has already discussed the candidacy with the candidate and a
|
||||
maintainer is willing to be a sponsor by opening the pull request. The candidate
|
||||
becomes a maintainer once the pull request is merged.
|
||||
|
||||
## Stepping down policy
|
||||
|
||||
Life priorities, interests, and passions can change. If you're a maintainer but
|
||||
feel you must remove yourself from the list, inform other maintainers that you
|
||||
intend to step down, and if possible, help find someone to pick up your work.
|
||||
At the very least, ensure your work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to remove
|
||||
yourself from the MAINTAINERS file.
|
||||
|
||||
## Removal of inactive maintainers
|
||||
|
||||
Similar to the procedure for adding new maintainers, existing maintainers can
|
||||
be removed from the list if they do not show significant activity on the
|
||||
project. Periodically, the maintainers review the list of maintainers and their
|
||||
activity over the last three months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a neutral
|
||||
person will contact the maintainer to ask if they want to continue being
|
||||
a maintainer. If the maintainer decides to step down as a maintainer, they
|
||||
open a pull request to be removed from the MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to perform the
|
||||
required duties they can be removed with a vote of at least 66% of the current
|
||||
maintainers. In this case, maintainers should first propose the change to
|
||||
maintainers via the maintainers communication channel, then open a pull request
|
||||
for voting. The voting period is five business days. The voting pull request
|
||||
should not come as a surpise to any maintainer and any discussion related to
|
||||
performance must not be discussed on the pull request.
|
||||
|
||||
## How are decisions made?
|
||||
|
||||
Docker distribution is an open-source project with an open design philosophy.
|
||||
This means that the repository is the source of truth for EVERY aspect of the
|
||||
project, including its philosophy, design, road map, and APIs. *If it's part of
|
||||
the project, it's in the repo. If it's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An
|
||||
implementation change is a change to the source code. An API change is a change
|
||||
to the API specification. A philosophy change is a change to the philosophy
|
||||
manifesto, and so on.
|
||||
|
||||
All decisions affecting distribution, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects.
|
||||
|
||||
## Helping contributors with the DCO
|
||||
|
||||
The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some contributors are not as familiar with `git`, or have used a web
|
||||
based editor, and thus asking them to `git commit --amend -s` is not the best
|
||||
way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO.
|
||||
The most trivial way for a contributor to allow the maintainer to do this, is to
|
||||
add a DCO signature in a pull requests's comment, or a maintainer can simply
|
||||
note that the change is sufficiently trivial that it does not substantially
|
||||
change the existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
|
||||
## I'm a maintainer. Should I make pull requests too?
|
||||
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
If you have a technical dispute that you feel has reached an impasse with a
|
||||
subset of the community, any contributor may open an issue, specifically
|
||||
calling for a resolution vote of the current core maintainers to resolve the
|
||||
dispute. The same voting quorums required (2/3) for adding and removing
|
||||
maintainers will apply to conflict resolution.
|
||||
26
vendor/github.com/distribution/reference/MAINTAINERS
generated
vendored
Normal file
26
vendor/github.com/distribution/reference/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
# Distribution project maintainers & reviewers
|
||||
#
|
||||
# See GOVERNANCE.md for maintainer versus reviewer roles
|
||||
#
|
||||
# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io)
|
||||
# GitHub ID, Name, Email address
|
||||
"chrispat","Chris Patterson","chrispat@github.com"
|
||||
"clarkbw","Bryan Clark","clarkbw@github.com"
|
||||
"corhere","Cory Snider","csnider@mirantis.com"
|
||||
"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com"
|
||||
"heww","He Weiwei","hweiwei@vmware.com"
|
||||
"joaodrp","João Pereira","jpereira@gitlab.com"
|
||||
"justincormack","Justin Cormack","justin.cormack@docker.com"
|
||||
"squizzi","Kyle Squizzato","ksquizzato@mirantis.com"
|
||||
"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com"
|
||||
"sargun","Sargun Dhillon","sargun@sargun.me"
|
||||
"wy65701436","Wang Yan","wangyan@vmware.com"
|
||||
"stevelasker","Steve Lasker","steve.lasker@microsoft.com"
|
||||
#
|
||||
# REVIEWERS
|
||||
# GitHub ID, Name, Email address
|
||||
"dmcgowan","Derek McGowan","derek@mcgstyle.net"
|
||||
"stevvooe","Stephen Day","stevvooe@gmail.com"
|
||||
"thajeztah","Sebastiaan van Stijn","github@gone.nl"
|
||||
"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com"
|
||||
"Jamstah", "James Hewitt", "james.hewitt@gmail.com"
|
||||
25
vendor/github.com/distribution/reference/Makefile
generated
vendored
Normal file
25
vendor/github.com/distribution/reference/Makefile
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Project packages.
|
||||
PACKAGES=$(shell go list ./...)
|
||||
|
||||
# Flags passed to `go test`
|
||||
BUILDFLAGS ?=
|
||||
TESTFLAGS ?=
|
||||
|
||||
.PHONY: all build test coverage
|
||||
.DEFAULT: all
|
||||
|
||||
all: build
|
||||
|
||||
build: ## no binaries to build, so just check compilation suceeds
|
||||
go build ${BUILDFLAGS} ./...
|
||||
|
||||
test: ## run tests
|
||||
go test ${TESTFLAGS} ./...
|
||||
|
||||
coverage: ## generate coverprofiles from the unit tests
|
||||
rm -f coverage.txt
|
||||
go test ${TESTFLAGS} -cover -coverprofile=cover.out ./...
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
30
vendor/github.com/distribution/reference/README.md
generated
vendored
Normal file
30
vendor/github.com/distribution/reference/README.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
# Distribution reference
|
||||
|
||||
Go library to handle references to container images.
|
||||
|
||||
<img src="/distribution-logo.svg" width="200px" />
|
||||
|
||||
[](https://github.com/distribution/reference/actions?query=workflow%3ACI)
|
||||
[](https://pkg.go.dev/github.com/distribution/reference)
|
||||
[](LICENSE)
|
||||
[](https://codecov.io/gh/distribution/reference)
|
||||
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield)
|
||||
|
||||
This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
|
||||
|
||||
## Contribution
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project.
|
||||
|
||||
## Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
This will be the best place to discuss design and implementation.
|
||||
|
||||
For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
|
||||
that everyone is welcome to join and chat about development.
|
||||
|
||||
## Licenses
|
||||
|
||||
The distribution codebase is released under the [Apache 2.0 license](LICENSE).
|
||||
7
vendor/github.com/distribution/reference/SECURITY.md
generated
vendored
Normal file
7
vendor/github.com/distribution/reference/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
|
||||
|
||||
Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io.
|
||||
1
vendor/github.com/distribution/reference/distribution-logo.svg
generated
vendored
Normal file
1
vendor/github.com/distribution/reference/distribution-logo.svg
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 8.6 KiB |
@ -32,7 +32,7 @@ func FamiliarString(ref Reference) string {
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
// See [path.Match] for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
@ -1,19 +1,42 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digestset"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// legacyDefaultDomain is the legacy domain for Docker Hub (which was
|
||||
// originally named "the Docker Index"). This domain is still used for
|
||||
// authentication and image search, which were part of the "v1" Docker
|
||||
// registry specification.
|
||||
//
|
||||
// This domain will continue to be supported, but there are plans to consolidate
|
||||
// legacy domains to new "canonical" domains. Once those domains are decided
|
||||
// on, we must update the normalization functions, but preserve compatibility
|
||||
// with existing installs, clients, and user configuration.
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
|
||||
// defaultDomain is the default domain used for images on Docker Hub.
|
||||
// It is used to normalize "familiar" names to canonical names, for example,
|
||||
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
|
||||
//
|
||||
// Note that actual domain of Docker Hub's registry is registry-1.docker.io.
|
||||
// This domain will continue to be supported, but there are plans to consolidate
|
||||
// legacy domains to new "canonical" domains. Once those domains are decided
|
||||
// on, we must update the normalization functions, but preserve compatibility
|
||||
// with existing installs, clients, and user configuration.
|
||||
defaultDomain = "docker.io"
|
||||
|
||||
// officialRepoPrefix is the namespace used for official images on Docker Hub.
|
||||
// It is used to normalize "familiar" names to canonical names, for example,
|
||||
// to convert "ubuntu" to "docker.io/library/ubuntu:latest".
|
||||
officialRepoPrefix = "library/"
|
||||
|
||||
// defaultTag is the default tag if no tag is provided.
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
@ -35,14 +58,14 @@ func ParseNormalizedNamed(s string) (Named, error) {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
var remote string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
remote = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
remote = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
if strings.ToLower(remote) != remote {
|
||||
return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote)
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
@ -56,41 +79,53 @@ func ParseNormalizedNamed(s string) (Named, error) {
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention. This is added
|
||||
// mainly for backward compatibility.
|
||||
// The reference returned can only be either tagged or digested. For reference contains both tag
|
||||
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
||||
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
||||
// namedTaggedDigested is a reference that has both a tag and a digest.
|
||||
type namedTaggedDigested interface {
|
||||
NamedTagged
|
||||
Digested
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention,
|
||||
// which allows for references to contain both a tag and a digest. It returns a
|
||||
// reference that is either tagged or digested. For references containing both
|
||||
// a tag and a digest, it returns a digested reference. For example, the following
|
||||
// reference:
|
||||
//
|
||||
// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
|
||||
//
|
||||
// Is returned as a digested reference (with the ":latest" tag removed):
|
||||
//
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
|
||||
//
|
||||
// References that are already "tagged" or "digested" are returned unmodified:
|
||||
//
|
||||
// // Already a digested reference
|
||||
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
|
||||
//
|
||||
// // Already a named reference
|
||||
// docker.io/library/busybox:latest
|
||||
func ParseDockerRef(ref string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := named.(NamedTagged); ok {
|
||||
if canonical, ok := named.(Canonical); ok {
|
||||
// The reference is both tagged and digested, only
|
||||
// return digested.
|
||||
newNamed, err := WithName(canonical.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newCanonical, err := WithDigest(newNamed, canonical.Digest())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newCanonical, nil
|
||||
if canonical, ok := named.(namedTaggedDigested); ok {
|
||||
// The reference is both tagged and digested; only return digested.
|
||||
newNamed, err := WithName(canonical.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WithDigest(newNamed, canonical.Digest())
|
||||
}
|
||||
return TagNameOnly(named), nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// splitDockerDomain splits a repository name to domain and remote-name.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
@ -99,13 +134,13 @@ func splitDockerDomain(name string) (domain, remainder string) {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
remainder = officialRepoPrefix + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
@ -119,8 +154,15 @@ func familiarizeName(named namedRepository) repository {
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
if strings.HasPrefix(repo.path, officialRepoPrefix) {
|
||||
// TODO(thaJeztah): this check may be too strict, as it assumes the
|
||||
// "library/" namespace does not have nested namespaces. While this
|
||||
// is true (currently), technically it would be possible for Docker
|
||||
// Hub to use those (e.g. "library/distros/ubuntu:latest").
|
||||
// See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785.
|
||||
if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') {
|
||||
repo.path = remainder
|
||||
}
|
||||
}
|
||||
}
|
||||
return repo
|
||||
@ -180,20 +222,3 @@ func ParseAnyReference(ref string) (Reference, error) {
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
@ -4,11 +4,14 @@
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// name := [domain '/'] remote-name
|
||||
// domain := host [':' port-number]
|
||||
// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A
|
||||
// domain-name := domain-component ['.' domain-component]*
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// path (or "remote-name") := path-component ['/' path-component]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
@ -21,7 +24,6 @@
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
@ -145,7 +147,7 @@ type namedRepository interface {
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
// Domain returns the domain part of the [Named] reference.
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
@ -154,7 +156,7 @@ func Domain(named Named) string {
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
// Path returns the name without the domain part of the [Named] reference.
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
@ -175,7 +177,8 @@ func splitDomain(name string) (string, string) {
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
//
|
||||
// Deprecated: Use [Domain] or [Path].
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
@ -185,7 +188,6 @@ func SplitHostname(named Named) (string, string) {
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
@ -237,7 +239,6 @@ func Parse(s string) (Reference, error) {
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
@ -320,11 +321,13 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
repo := repository{}
|
||||
if r, ok := ref.(namedRepository); ok {
|
||||
repo.domain, repo.path = r.Domain(), r.Path()
|
||||
} else {
|
||||
repo.domain, repo.path = splitDomain(ref.Name())
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
163
vendor/github.com/distribution/reference/regexp.go
generated
vendored
Normal file
163
vendor/github.com/distribution/reference/regexp.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
|
||||
var DigestRegexp = regexp.MustCompile(digestPat)
|
||||
|
||||
// DomainRegexp matches hostname or IP-addresses, optionally including a port
|
||||
// number. It defines the structure of potential domain components that may be
|
||||
// part of image names. This is purposely a subset of what is allowed by DNS to
|
||||
// ensure backwards compatibility with Docker image names. It may be a subset of
|
||||
// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
|
||||
// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
|
||||
// addresses such as IPv4-Mapped).
|
||||
//
|
||||
// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
|
||||
var DomainRegexp = regexp.MustCompile(domainAndPort)
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
var IdentifierRegexp = regexp.MustCompile(identifier)
|
||||
|
||||
// NameRegexp is the format for the name component of references, including
|
||||
// an optional domain and port, but without tag or digest suffix.
|
||||
var NameRegexp = regexp.MustCompile(namePat)
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
var ReferenceRegexp = regexp.MustCompile(referencePat)
|
||||
|
||||
// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
|
||||
//
|
||||
// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
|
||||
var TagRegexp = regexp.MustCompile(tag)
|
||||
|
||||
const (
|
||||
// alphanumeric defines the alphanumeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphanumeric = `[a-z0-9]+`
|
||||
|
||||
// separator defines the separators allowed to be embedded in name
|
||||
// components. This allows one period, one or two underscore and multiple
|
||||
// dashes. Repeated dashes and underscores are intentionally treated
|
||||
// differently. In order to support valid hostnames as name components,
|
||||
// supporting repeated dash was added. Additionally double underscore is
|
||||
// now allowed as a separator to loosen the restriction for previously
|
||||
// supported names.
|
||||
separator = `(?:[._]|__|[-]+)`
|
||||
|
||||
// localhost is treated as a special value for domain-name. Any other
|
||||
// domain-name without a "." or a ":port" are considered a path component.
|
||||
localhost = `localhost`
|
||||
|
||||
// domainNameComponent restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp.
|
||||
domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
|
||||
|
||||
// optionalPort matches an optional port-number including the port separator
|
||||
// (e.g. ":80").
|
||||
optionalPort = `(?::[0-9]+)?`
|
||||
|
||||
// tag matches valid tag names. From docker/docker:graph/tags.go.
|
||||
tag = `[\w][\w.-]{0,127}`
|
||||
|
||||
// digestPat matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
|
||||
//
|
||||
// TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp
|
||||
// so that go-digest defines the canonical format. Note that the go-digest is
|
||||
// more relaxed:
|
||||
// - it allows multiple algorithms (e.g. "sha256+b64:<encoded>") to allow
|
||||
// future expansion of supported algorithms.
|
||||
// - it allows the "<encoded>" value to use urlsafe base64 encoding as defined
|
||||
// in [rfc4648, section 5].
|
||||
//
|
||||
// [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5.
|
||||
digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
|
||||
|
||||
// identifier is the format for a content addressable identifier using sha256.
|
||||
// These identifiers are like digests without the algorithm, since sha256 is used.
|
||||
identifier = `([a-f0-9]{64})`
|
||||
|
||||
// ipv6address are enclosed between square brackets and may be represented
|
||||
// in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
|
||||
// are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
|
||||
// IPv4-Mapped are deliberately excluded.
|
||||
ipv6address = `\[(?:[a-fA-F0-9:]+)\]`
|
||||
)
|
||||
|
||||
var (
|
||||
// domainName defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names. This includes IPv4 addresses on decimal format.
|
||||
domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent)
|
||||
|
||||
// host defines the structure of potential domains based on the URI
|
||||
// Host subcomponent on rfc3986. It may be a subset of DNS domain name,
|
||||
// or an IPv4 address in decimal format, or an IPv6 address between square
|
||||
// brackets (excluding zone identifiers as defined by rfc6874 or special
|
||||
// addresses such as IPv4-Mapped).
|
||||
host = `(?:` + domainName + `|` + ipv6address + `)`
|
||||
|
||||
// allowed by the URI Host subcomponent on rfc3986 to ensure backwards
|
||||
// compatibility with Docker image names.
|
||||
domainAndPort = host + optionalPort
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = regexp.MustCompile(anchored(tag))
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat))
|
||||
|
||||
// pathComponent restricts path-components to start with an alphanumeric
|
||||
// character, with following parts able to be separated by a separator
|
||||
// (one period, one or two underscore and multiple dashes).
|
||||
pathComponent = alphanumeric + anyTimes(separator+alphanumeric)
|
||||
|
||||
// remoteName matches the remote-name of a repository. It consists of one
|
||||
// or more forward slash (/) delimited path-components:
|
||||
//
|
||||
// pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
|
||||
remoteName = pathComponent + anyTimes(`/`+pathComponent)
|
||||
namePat = optional(domainAndPort+`/`) + remoteName
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName)))
|
||||
|
||||
referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat)))
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier))
|
||||
)
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...string) string {
|
||||
return `(?:` + strings.Join(res, "") + `)?`
|
||||
}
|
||||
|
||||
// anyTimes wraps the expression in a non-capturing group that can occur
|
||||
// any number of times.
|
||||
func anyTimes(res ...string) string {
|
||||
return `(?:` + strings.Join(res, "") + `)*`
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...string) string {
|
||||
return `(` + strings.Join(res, "") + `)`
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...string) string {
|
||||
return `^` + strings.Join(res, "") + `$`
|
||||
}
|
||||
75
vendor/github.com/distribution/reference/sort.go
generated
vendored
Normal file
75
vendor/github.com/distribution/reference/sort.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package reference
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sort sorts string references preferring higher information references.
|
||||
//
|
||||
// The precedence is as follows:
|
||||
//
|
||||
// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:<digest>")
|
||||
// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest")
|
||||
// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:<digest>")
|
||||
// 4. [Named] (e.g., "docker.io/library/busybox")
|
||||
// 5. [Digested] (e.g., "docker.io@sha256:<digest>")
|
||||
// 6. Parse error
|
||||
func Sort(references []string) []string {
|
||||
var prefs []Reference
|
||||
var bad []string
|
||||
|
||||
for _, ref := range references {
|
||||
pref, err := ParseAnyReference(ref)
|
||||
if err != nil {
|
||||
bad = append(bad, ref)
|
||||
} else {
|
||||
prefs = append(prefs, pref)
|
||||
}
|
||||
}
|
||||
sort.Slice(prefs, func(a, b int) bool {
|
||||
ar := refRank(prefs[a])
|
||||
br := refRank(prefs[b])
|
||||
if ar == br {
|
||||
return prefs[a].String() < prefs[b].String()
|
||||
}
|
||||
return ar < br
|
||||
})
|
||||
sort.Strings(bad)
|
||||
var refs []string
|
||||
for _, pref := range prefs {
|
||||
refs = append(refs, pref.String())
|
||||
}
|
||||
return append(refs, bad...)
|
||||
}
|
||||
|
||||
func refRank(ref Reference) uint8 {
|
||||
if _, ok := ref.(Named); ok {
|
||||
if _, ok = ref.(Tagged); ok {
|
||||
if _, ok = ref.(Digested); ok {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if _, ok = ref.(Digested); ok {
|
||||
return 3
|
||||
}
|
||||
return 4
|
||||
}
|
||||
return 5
|
||||
}
|
||||
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
247
vendor/github.com/docker/distribution/digestset/set.go
generated
vendored
@ -1,247 +0,0 @@
|
||||
package digestset
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found")
|
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string")
|
||||
)
|
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct {
|
||||
mutex sync.RWMutex
|
||||
entries digestEntries
|
||||
}
|
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set {
|
||||
return &Set{
|
||||
entries: digestEntries{},
|
||||
}
|
||||
}
|
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
|
||||
if len(hex) == len(shortHex) {
|
||||
if hex != shortHex {
|
||||
return false
|
||||
}
|
||||
if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
} else if !strings.HasPrefix(hex, shortHex) {
|
||||
return false
|
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
if len(dst.entries) == 0 {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
var (
|
||||
searchFunc func(int) bool
|
||||
alg digest.Algorithm
|
||||
hex string
|
||||
)
|
||||
dgst, err := digest.Parse(d)
|
||||
if err == digest.ErrDigestInvalidFormat {
|
||||
hex = d
|
||||
searchFunc = func(i int) bool {
|
||||
return dst.entries[i].val >= d
|
||||
}
|
||||
} else {
|
||||
hex = dgst.Hex()
|
||||
alg = dgst.Algorithm()
|
||||
searchFunc = func(i int) bool {
|
||||
if dst.entries[i].val == hex {
|
||||
return dst.entries[i].alg >= alg
|
||||
}
|
||||
return dst.entries[i].val >= hex
|
||||
}
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
|
||||
return "", ErrDigestNotFound
|
||||
}
|
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
|
||||
return "", ErrDigestAmbiguous
|
||||
}
|
||||
|
||||
return dst.entries[idx].digest, nil
|
||||
}
|
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
if idx == len(dst.entries) {
|
||||
dst.entries = append(dst.entries, entry)
|
||||
return nil
|
||||
} else if dst.entries[idx].digest == d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := append(dst.entries, nil)
|
||||
copy(entries[idx+1:], entries[idx:len(entries)-1])
|
||||
entries[idx] = entry
|
||||
dst.entries = entries
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d digest.Digest) error {
|
||||
if err := d.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
dst.mutex.Lock()
|
||||
defer dst.mutex.Unlock()
|
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
|
||||
searchFunc := func(i int) bool {
|
||||
if dst.entries[i].val == entry.val {
|
||||
return dst.entries[i].alg >= entry.alg
|
||||
}
|
||||
return dst.entries[i].val >= entry.val
|
||||
}
|
||||
idx := sort.Search(len(dst.entries), searchFunc)
|
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := dst.entries
|
||||
copy(entries[idx:], entries[idx+1:])
|
||||
entries = entries[:len(entries)-1]
|
||||
dst.entries = entries
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []digest.Digest {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
retValues := make([]digest.Digest, len(dst.entries))
|
||||
for i := range dst.entries {
|
||||
retValues[i] = dst.entries[i].digest
|
||||
}
|
||||
|
||||
return retValues
|
||||
}
|
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
|
||||
dst.mutex.RLock()
|
||||
defer dst.mutex.RUnlock()
|
||||
m := make(map[digest.Digest]string, len(dst.entries))
|
||||
l := length
|
||||
resetIdx := 0
|
||||
for i := 0; i < len(dst.entries); i++ {
|
||||
var short string
|
||||
extended := true
|
||||
for extended {
|
||||
extended = false
|
||||
if len(dst.entries[i].val) <= l {
|
||||
short = dst.entries[i].digest.String()
|
||||
} else {
|
||||
short = dst.entries[i].val[:l]
|
||||
for j := i + 1; j < len(dst.entries); j++ {
|
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
|
||||
if j > resetIdx {
|
||||
resetIdx = j
|
||||
}
|
||||
extended = true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if extended {
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
m[dst.entries[i].digest] = short
|
||||
if i >= resetIdx {
|
||||
l = length
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type digestEntry struct {
|
||||
alg digest.Algorithm
|
||||
val string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
type digestEntries []*digestEntry
|
||||
|
||||
func (d digestEntries) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
func (d digestEntries) Less(i, j int) bool {
|
||||
if d[i].val != d[j].val {
|
||||
return d[i].val < d[j].val
|
||||
}
|
||||
return d[i].alg < d[j].alg
|
||||
}
|
||||
|
||||
func (d digestEntries) Swap(i, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
143
vendor/github.com/docker/distribution/reference/regexp.go
generated
vendored
@ -1,143 +0,0 @@
|
||||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(DomainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(DomainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
||||
24
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
24
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
@ -1,10 +1,30 @@
|
||||
# Change history of go-restful
|
||||
|
||||
## [v3.9.0] - 20221-07-21
|
||||
## [v3.11.0] - 2023-08-19
|
||||
|
||||
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
|
||||
|
||||
## [v3.10.2] - 2023-03-09 - DO NOT USE
|
||||
|
||||
- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0
|
||||
see comment in Readme how to customize this behaviour.
|
||||
|
||||
## [v3.10.1] - 2022-11-19 - DO NOT USE
|
||||
|
||||
- fix broken 3.10.0 by using path package for joining paths
|
||||
|
||||
## [v3.10.0] - 2022-10-11 - BROKEN
|
||||
|
||||
- changed tokenizer to match std route match behavior; do not trimright the path (#511)
|
||||
- Add MIME_ZIP (#512)
|
||||
- Add MIME_ZIP and HEADER_ContentDisposition (#513)
|
||||
- Changed how to get query parameter issue #510
|
||||
|
||||
## [v3.9.0] - 2022-07-21
|
||||
|
||||
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
|
||||
|
||||
## [v3.8.0] - 20221-06-06
|
||||
## [v3.8.0] - 2022-06-06
|
||||
|
||||
- use exact matching of allowed domain entries, issue #489 (#493)
|
||||
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
||||
|
||||
5
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
5
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
@ -79,7 +79,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
|
||||
- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
@ -96,6 +96,7 @@ There are several hooks to customize the behavior of the go-restful package.
|
||||
- Compression
|
||||
- Encoders for other serializers
|
||||
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
|
||||
- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
|
||||
|
||||
## Resources
|
||||
|
||||
@ -108,4 +109,4 @@ There are several hooks to customize the behavior of the go-restful package.
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
||||
© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
||||
|
||||
2
vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
@ -7,12 +7,14 @@ package restful
|
||||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_ContentDisposition = "Content-Disposition"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
|
||||
5
vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
5
vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request {
|
||||
// a "Unable to unmarshal content of type:" response is returned.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
//
|
||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||
func DefaultRequestContentType(mime string) {
|
||||
defaultRequestContentType = mime
|
||||
}
|
||||
@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string {
|
||||
|
||||
// QueryParameter returns the (first) Query parameter value by its name
|
||||
func (r *Request) QueryParameter(name string) string {
|
||||
return r.Request.FormValue(name)
|
||||
return r.Request.URL.Query().Get(name)
|
||||
}
|
||||
|
||||
// QueryParameters returns the all the query parameters values by name
|
||||
|
||||
3
vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
3
vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
||||
if DefaultResponseMimeType == MIME_XML {
|
||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
||||
}
|
||||
if DefaultResponseMimeType == MIME_ZIP {
|
||||
return entityAccessRegistry.accessorAt(MIME_ZIP)
|
||||
}
|
||||
// Fallback to whatever the route says it can produce.
|
||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
for _, each := range r.routeProduces {
|
||||
|
||||
17
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
17
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
@ -40,7 +40,8 @@ type Route struct {
|
||||
ParameterDocs []*Parameter
|
||||
ResponseErrors map[int]ResponseError
|
||||
DefaultResponse *ResponseError
|
||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
||||
WriteSamples []interface{} // if more than one return types is possible (oneof) then this will contain multiple values
|
||||
|
||||
// Extra information used to store custom information about the route.
|
||||
Metadata map[string]interface{}
|
||||
@ -164,7 +165,13 @@ func tokenizePath(path string) []string {
|
||||
if "/" == path {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(strings.Trim(path, "/"), "/")
|
||||
if TrimRightSlashEnabled {
|
||||
// 3.9.0
|
||||
return strings.Split(strings.Trim(path, "/"), "/")
|
||||
} else {
|
||||
// 3.10.2
|
||||
return strings.Split(strings.TrimLeft(path, "/"), "/")
|
||||
}
|
||||
}
|
||||
|
||||
// for debugging
|
||||
@ -176,3 +183,9 @@ func (r *Route) String() string {
|
||||
func (r *Route) EnableContentEncoding(enabled bool) {
|
||||
r.contentEncodingEnabled = &enabled
|
||||
}
|
||||
|
||||
// TrimRightSlashEnabled controls whether
|
||||
// - path on route building is using path.Join
|
||||
// - the path of the incoming request is trimmed of its slash suffux.
|
||||
// Value of true matches the behavior of <= 3.9.0
|
||||
var TrimRightSlashEnabled = true
|
||||
|
||||
55
vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
55
vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
@ -7,6 +7,7 @@ package restful
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
@ -30,27 +31,29 @@ type RouteBuilder struct {
|
||||
typeNameHandleFunc TypeNameHandleFunction // required
|
||||
|
||||
// documentation
|
||||
doc string
|
||||
notes string
|
||||
operation string
|
||||
readSample, writeSample interface{}
|
||||
parameters []*Parameter
|
||||
errorMap map[int]ResponseError
|
||||
defaultResponse *ResponseError
|
||||
metadata map[string]interface{}
|
||||
extensions map[string]interface{}
|
||||
deprecated bool
|
||||
contentEncodingEnabled *bool
|
||||
doc string
|
||||
notes string
|
||||
operation string
|
||||
readSample interface{}
|
||||
writeSamples []interface{}
|
||||
parameters []*Parameter
|
||||
errorMap map[int]ResponseError
|
||||
defaultResponse *ResponseError
|
||||
metadata map[string]interface{}
|
||||
extensions map[string]interface{}
|
||||
deprecated bool
|
||||
contentEncodingEnabled *bool
|
||||
}
|
||||
|
||||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
||||
// Example:
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||
// }
|
||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||
//
|
||||
// func Returns500(b *RouteBuilder) {
|
||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||
// }
|
||||
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
||||
for _, each := range oneArgBlocks {
|
||||
each(b)
|
||||
@ -133,9 +136,9 @@ func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
|
||||
return p
|
||||
}
|
||||
|
||||
// Writes tells what resource type will be written as the response payload. Optional.
|
||||
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
|
||||
b.writeSample = sample
|
||||
// Writes tells which one of the resource types will be written as the response payload. Optional.
|
||||
func (b *RouteBuilder) Writes(samples ...interface{}) *RouteBuilder {
|
||||
b.writeSamples = samples // oneof
|
||||
return b
|
||||
}
|
||||
|
||||
@ -340,19 +343,29 @@ func (b *RouteBuilder) Build() Route {
|
||||
ResponseErrors: b.errorMap,
|
||||
DefaultResponse: b.defaultResponse,
|
||||
ReadSample: b.readSample,
|
||||
WriteSample: b.writeSample,
|
||||
WriteSamples: b.writeSamples,
|
||||
Metadata: b.metadata,
|
||||
Deprecated: b.deprecated,
|
||||
contentEncodingEnabled: b.contentEncodingEnabled,
|
||||
allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType,
|
||||
}
|
||||
// set WriteSample if one specified
|
||||
if len(b.writeSamples) == 1 {
|
||||
route.WriteSample = b.writeSamples[0]
|
||||
}
|
||||
route.Extensions = b.extensions
|
||||
route.postBuild()
|
||||
return route
|
||||
}
|
||||
|
||||
func concatPath(path1, path2 string) string {
|
||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
// merge two paths using the current (package global) merge path strategy.
|
||||
func concatPath(rootPath, routePath string) string {
|
||||
|
||||
if TrimRightSlashEnabled {
|
||||
return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/")
|
||||
} else {
|
||||
return path.Join(rootPath, routePath)
|
||||
}
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
||||
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
rules:
|
||||
prevent_unmaintained_packages:
|
||||
list-mode: strict
|
||||
files:
|
||||
- $all
|
||||
- "!$test"
|
||||
allow:
|
||||
- $gostd
|
||||
- github.com/x448/float16
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||
dupl:
|
||||
threshold: 100
|
||||
funlen:
|
||||
lines: 100
|
||||
statements: 50
|
||||
goconst:
|
||||
ignore-tests: true
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
disabled-checks:
|
||||
- commentedOutCode
|
||||
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||
- ifElseChain
|
||||
- octalLiteral
|
||||
- paramTypeCombine
|
||||
- whyNoLint
|
||||
gofmt:
|
||||
simplify: false
|
||||
goimports:
|
||||
local-prefixes: github.com/fxamacker/cbor
|
||||
golint:
|
||||
min-confidence: 0
|
||||
govet:
|
||||
check-shadowing: true
|
||||
lll:
|
||||
line-length: 140
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks: ["all"]
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- depguard
|
||||
- errcheck
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nilerr
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
issues:
|
||||
# max-issues-per-linter default is 50. Set to 0 to disable limit.
|
||||
max-issues-per-linter: 0
|
||||
# max-same-issues default is 3. Set to 0 to disable limit.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
- path: decode.go
|
||||
text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `, ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string ` for type ` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
|
||||
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
faye.github@gmail.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
# How to contribute
|
||||
|
||||
You can contribute by using the library, opening issues, or opening pull requests.
|
||||
|
||||
## Bug reports and security vulnerabilities
|
||||
|
||||
Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
|
||||
|
||||
To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
|
||||
|
||||
Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
|
||||
|
||||
## Pull requests
|
||||
|
||||
Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
|
||||
|
||||
Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
|
||||
|
||||
See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
|
||||
|
||||
Pull requests have a greater chance of being approved if:
|
||||
- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
|
||||
- it has > 97% code coverage.
|
||||
|
||||
## Describe your issue
|
||||
|
||||
Clearly describe the issue:
|
||||
* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
|
||||
* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
|
||||
* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
|
||||
|
||||
## Please don't
|
||||
|
||||
Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
|
||||
|
||||
Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
|
||||
|
||||
## Credits
|
||||
|
||||
- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
|
||||
- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user