From 767f6a7e77b8608ca8f3c2401f0feb9ac8ea5c1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 11:24:36 +0000 Subject: [PATCH] build(deps): bump google.golang.org/grpc from 1.68.1 to 1.69.0 Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.68.1 to 1.69.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.68.1...v1.69.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 +- go.sum | 28 +- vendor/github.com/go-logr/logr/README.md | 1 + .../grpc/balancer/balancer.go | 93 --- .../balancer/pickfirst/internal/internal.go | 17 +- .../grpc/balancer/pickfirst/pickfirst.go | 2 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 472 ++++++++--- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer/subconn.go | 134 ++++ .../grpc/balancer_wrapper.go | 77 ++ .../grpc_binarylog_v1/binarylog.pb.go | 180 +---- vendor/google.golang.org/grpc/clientconn.go | 11 +- vendor/google.golang.org/grpc/codec.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 16 - .../grpc/experimental/stats/metricregistry.go | 27 +- .../grpc/experimental/stats/metrics.go | 75 -- .../grpc/grpclog/internal/loggerv2.go | 107 ++- .../grpc/internal/backoff/backoff.go | 2 +- .../grpc/internal/internal.go | 22 +- .../internal/resolver/dns/dns_resolver.go | 41 +- .../grpc/internal/transport/client_stream.go | 144 ++++ .../grpc/internal/transport/flowcontrol.go | 9 +- .../grpc/internal/transport/handler_server.go | 36 +- .../grpc/internal/transport/http2_client.go | 91 +-- .../grpc/internal/transport/http2_server.go | 69 +- .../grpc/internal/transport/server_stream.go | 178 +++++ .../grpc/internal/transport/transport.go | 321 +------- .../grpc/mem/buffer_slice.go | 59 +- vendor/google.golang.org/grpc/preloader.go | 4 +- .../grpc/resolver/resolver.go | 22 +- vendor/google.golang.org/grpc/rpc_util.go | 42 +- vendor/google.golang.org/grpc/server.go | 87 +- .../google.golang.org/grpc/service_config.go | 5 +- .../google.golang.org/grpc/stats/metrics.go | 81 ++ vendor/google.golang.org/grpc/stats/stats.go | 74 +- vendor/google.golang.org/grpc/stream.go | 62 +- vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/protojson/decode.go | 2 +- .../protobuf/encoding/protojson/encode.go | 4 +- .../protobuf/internal/descopts/options.go | 20 +- .../internal/editionssupport/editions.go | 2 +- .../protobuf/internal/filedesc/desc.go | 4 + .../protobuf/internal/filedesc/desc_init.go | 2 + .../protobuf/internal/filedesc/desc_lazy.go | 2 + .../protobuf/internal/filedesc/editions.go | 2 +- .../protobuf/internal/genid/doc.go | 2 +- .../internal/genid/go_features_gen.go | 15 +- .../protobuf/internal/genid/map_entry.go | 2 +- .../protobuf/internal/genid/wrappers.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 11 +- .../protobuf/internal/impl/codec_field.go | 3 + .../protobuf/internal/impl/codec_message.go | 3 + .../protobuf/internal/impl/codec_reflect.go | 210 ----- .../protobuf/internal/impl/codec_unsafe.go | 3 - .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/encode.go | 2 +- .../protobuf/internal/impl/equal.go | 224 ++++++ .../internal/impl/legacy_extension.go | 1 + .../protobuf/internal/impl/message.go | 4 +- .../protobuf/internal/impl/pointer_reflect.go | 215 ----- .../protobuf/internal/impl/pointer_unsafe.go | 3 - .../protobuf/internal/strs/strings_pure.go | 28 - .../internal/strs/strings_unsafe_go120.go | 3 +- .../internal/strs/strings_unsafe_go121.go | 3 +- .../protobuf/internal/version/version.go | 4 +- .../google.golang.org/protobuf/proto/equal.go | 9 + .../protobuf/proto/extension.go | 71 ++ .../protobuf/reflect/protodesc/desc_init.go | 4 + .../protobuf/reflect/protodesc/editions.go | 2 +- .../protobuf/reflect/protoreflect/methods.go | 10 + .../reflect/protoreflect/value_pure.go | 60 -- .../protoreflect/value_unsafe_go120.go | 3 +- .../protoreflect/value_unsafe_go121.go | 3 +- .../protobuf/runtime/protoiface/methods.go | 18 + .../types/descriptorpb/descriptor.pb.go | 748 ++++-------------- .../types/gofeaturespb/go_features.pb.go | 24 +- .../protobuf/types/known/anypb/any.pb.go | 24 +- .../types/known/durationpb/duration.pb.go | 24 +- .../types/known/timestamppb/timestamp.pb.go | 24 +- .../types/known/wrapperspb/wrappers.pb.go | 200 +---- vendor/modules.txt | 10 +- 81 files changed, 2097 insertions(+), 2520 deletions(-) create mode 100644 vendor/google.golang.org/grpc/balancer/subconn.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/client_stream.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/server_stream.go create mode 100644 vendor/google.golang.org/grpc/stats/metrics.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/equal.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go diff --git a/go.mod b/go.mod index 03ad9e1e..25f85eb1 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/container-storage-interface/spec v1.11.0 github.com/kubernetes-csi/csi-lib-utils v0.14.1 golang.org/x/net v0.32.0 - google.golang.org/grpc v1.68.1 + google.golang.org/grpc v1.69.0 k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.28.12 k8s.io/mount-utils v0.28.12 // indirect @@ -18,7 +18,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect @@ -48,8 +48,8 @@ require ( golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index a72e2aed..a9c74b62 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,10 @@ github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKf github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -107,6 +109,16 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -149,12 +161,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= +google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index 8969526a..7c7f0c69 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 3a2092f1..382ad694 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,17 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -106,57 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully close, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which may be - // called when the Producer is no longer needed. Otherwise the producer - // will automatically be closed upon connection loss or subchannel close. - // Should only be called on a SubConn in state Ready. Otherwise the - // producer will be unable to create streams. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -424,18 +362,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -448,22 +374,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Build also returns a close function that will be called when all - // references to the Producer have been given up for a SubConn, or when a - // connectivity state change occurs on the SubConn. The close function - // should always block until all asynchronous cleanup work is completed. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index c5197894..7d66cb49 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -18,7 +18,18 @@ // Package internal contains code internal to the pickfirst package. package internal -import "math/rand" +import ( + rand "math/rand/v2" + "time" +) -// RandShuffle pseudo-randomizes the order of addresses. -var RandShuffle = rand.Shuffle +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index e069346a..ea889981 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,7 +23,7 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 985b6edc..2fc0a71f 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,11 +29,15 @@ import ( "encoding/json" "errors" "fmt" + "net" + "net/netip" "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -50,26 +54,68 @@ func init() { balancer.Register(pickfirstBuilder{}) } +// enableHealthListenerKeyType is a unique key type used in resolver attributes +// to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + var ( logger = grpclog.Component("pick-first-leaf-lb") // Name is the name of the pick_first_leaf balancer. // It is changed to "pick_first" in init() if this balancer is to be // registered as the default pickfirst. - Name = "pick_first_leaf" + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "disconnection", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) ) -// TODO: change to pick-first when this becomes the default pick_first policy. -const logPrefix = "[pick-first-leaf-lb %p] " +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{ - cc: cc, - addressList: addressList{}, - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - mu: sync.Mutex{}, + cc: cc, + target: bo.Target.String(), + metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder. + + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -87,6 +133,13 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan return cfg, nil } +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -104,14 +157,19 @@ type scData struct { subConn balancer.SubConn addr resolver.Address - state connectivity.State - lastErr error + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { sd := &scData{ - state: connectivity.Idle, - addr: addr, + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ StateListener: func(state balancer.SubConnState) { @@ -128,19 +186,25 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { type pickfirstBalancer struct { // The following fields are initialized at build time and read-only after // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil // The mutex is used to ensure synchronization of updates triggered // from the idle picker and the already serialized resolver, // SubConn state updates. - mu sync.Mutex + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } // ResolverError is called by the ClientConn when the name resolver produces @@ -166,7 +230,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { return } - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) @@ -175,15 +239,16 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. - b.state = connectivity.TransientFailure b.closeSubConnsLocked() b.addressList.updateAddrs(nil) b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) @@ -206,9 +271,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8305 section 4." - A61 - // TODO: support the above language. newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -231,16 +293,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) - // Since we have a new set of addresses, we are again at first pass. - b.firstPass = true - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready b.addressList.updateAddrs(newAddrs) - if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } @@ -252,18 +315,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // we should still enter CONNECTING because the sticky TF behaviour // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported // due to connectivity failures. - if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { // Start connection attempt at first address. - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + b.forceUpdateConcludedStateLocked(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - b.requestConnectionLocked() + b.startFirstPassLocked() } else if b.state == connectivity.TransientFailure { // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. - b.requestConnectionLocked() + b.startFirstPassLocked() } return nil } @@ -278,6 +340,7 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() + b.cancelConnectionTimer() b.state = connectivity.Shutdown } @@ -287,10 +350,19 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() - if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { - b.firstPass = true - b.requestConnectionLocked() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false } + b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { @@ -314,6 +386,70 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses @@ -342,6 +478,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() for _, v := range b.subConns.Values() { sd := v.(*scData) if sd.subConn != selected.subConn { @@ -382,46 +519,89 @@ func (b *pickfirstBalancer) requestConnectionLocked() { } scd := sd.(*scData) - switch scd.state { + switch scd.rawConnectivityState { case connectivity.Idle: scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return case connectivity.TransientFailure: - // Try the next address. + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true lastErr = scd.lastErr continue - case connectivity.Ready: - // Should never happen. - b.logger.Errorf("Requesting a connection even though we have a READY SubConn") - case connectivity.Shutdown: - // Should never happen. - b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") case connectivity.Connecting: - // Wait for the SubConn to report success or failure. + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + return + } - return } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass. - b.endFirstPassLocked(lastErr) + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() - oldState := sd.state - sd.state = newState.ConnectivityState + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic // is included to check if the current list of active SubConns includes this // SubConn. - if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + if !b.isActiveSCData(sd) { return } if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) b.shutdownRemainingLocked(sd) if !b.addressList.seekTo(sd.addr) { // This should not fail as we should have only one SubConn after @@ -429,10 +609,30 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.state = connectivity.Ready - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) }) return } @@ -443,13 +643,24 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // a transport is successfully created, but the connection fails // before the SubConn can send the notification for READY. We treat // this as a successful connection and transition to IDLE. - if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { // Once a transport fails, the balancer enters IDLE and starts from // the first address when the picker is used. b.shutdownRemainingLocked(sd) - b.state = connectivity.Idle + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) b.addressList.reset() - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Idle, Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, }) @@ -459,32 +670,35 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if b.firstPass { switch newState.ConnectivityState { case connectivity.Connecting: - // The balancer can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in // TRANSIENT_FAILURE until it's READY. See A62. - // If the balancer is already in CONNECTING, no update is needed. - if b.state == connectivity.Idle { - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) } case connectivity.TransientFailure: sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. We ignore such updates. - - if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - return - } - if b.addressList.increment() { - b.requestConnectionLocked() - return + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } } - // End of the first pass. - b.endFirstPassLocked(newState.ConnectionError) + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) } return } @@ -495,7 +709,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.numTF = (b.numTF + 1) % b.subConns.Len() sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: newState.ConnectionError}, }) @@ -509,24 +723,95 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } -func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } b.firstPass = false - b.numTF = 0 - b.state = connectivity.TransientFailure - - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. for _, v := range b.subConns.Values() { sd := v.(*scData) - if sd.state == connectivity.Idle { + if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } } } +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + type picker struct { result balancer.PickResult err error @@ -583,15 +868,6 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } -// first returns the first address in the list. If the list is empty, it returns -// an empty address instead. -func (al *addressList) first() resolver.Address { - if len(al.addresses) == 0 { - return resolver.Address{} - } - return al.addresses[0] -} - func (al *addressList) reset() { al.idx = 0 } @@ -614,6 +890,16 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d3..80a42d22 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), + next: uint32(rand.IntN(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 00000000..ea27c4fa --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing poilices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 2a4f2878..905817b5 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -189,6 +189,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil @@ -254,12 +255,32 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) producersMu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{connectivityState: s} } // updateState is invoked by grpc to push a subConn state update to the @@ -279,6 +300,24 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + acbw.stateListener(scs) }) } @@ -373,3 +412,41 @@ func (acbw *acBalancerWrapper) closeProducers() { delete(acbw.producers, pb) } } + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return + } + + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + curHD := acbw.healthData + if curHD != hd { + return + } + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + }) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 55bffaa7..9e9d0806 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -274,11 +274,9 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -440,11 +438,9 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -509,11 +505,9 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -565,11 +559,9 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -638,11 +630,9 @@ type Message struct { func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -713,11 +703,9 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -762,11 +750,9 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -820,11 +806,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1057,104 +1041,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 19763f8e..4f57b554 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -775,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -1374,7 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1448,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1480,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index e840858b..959c2f99 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil + return mem.BufferSlice{mem.SliceBuffer(data)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 518692c3..7494ae59 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -445,10 +444,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -662,16 +657,6 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -682,7 +667,6 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 1d827dd5..ad75313a 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" ) func init() { @@ -34,7 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = NewMetrics() +var DefaultMetrics = stats.NewMetricSet() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -42,7 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name Metric + Name string // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -154,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) +var registeredMetrics = make(map[string]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) +var metricsRegistry = make(map[string]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] } -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) } - registeredMetrics[name] = true + registeredMetrics[metricName] = true if def { - DefaultMetrics = DefaultMetrics.Add(name) + DefaultMetrics = DefaultMetrics.Add(metricName) } } @@ -256,8 +257,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 3221f7a6..bf9e7f98 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,8 +19,6 @@ // Package stats contains experimental metrics/stats API's. package stats -import "maps" - // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { // RecordInt64Count records the measurement alongside labels on the int @@ -39,76 +37,3 @@ type MetricsRecorder interface { // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } - -// Metric is an identifier for a metric. -type Metric string - -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} - -// NewMetrics returns a Metrics containing Metrics. -func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } -} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index 07df71e9..ed90060c 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,22 @@ var severityName = []string{ fatalLog: "FATAL", } +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -111,7 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + g.m[severity].Output(2, sevStr+": "+s) return } // TODO: we can also include the logging component, but that needs more @@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) + g.print(infoLog, args...) } func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) + g.println(infoLog, args...) } func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) + g.printf(infoLog, format, args...) } func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) + g.print(warningLog, args...) } func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) + g.println(warningLog, args...) } func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) + g.printf(warningLog, format, args...) } func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) + g.print(errorLog, args...) } func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) + g.println(errorLog, args...) } func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) + g.printf(errorLog, format, args...) } func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) + g.print(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) + g.println(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) + g.printf(fatalLog, format, args...) + exit(1) } func (g *loggerT) V(l int) bool { @@ -186,19 +226,42 @@ type LoggerV2Config struct { FormatJSON bool } +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482..b6ae7f25 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 20b4dc3d..3afc1813 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,8 +29,6 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -149,6 +147,20 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -255,3 +267,9 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 374c12fb..ba5c5a95 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -260,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -322,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -351,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -379,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -427,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 00000000..8ed347c5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and popagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c51..dfc0f224 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index ce878693..d9305a65 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -471,9 +473,7 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 62b81885..f323ab7f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -123,7 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -199,10 +199,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -339,7 +339,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -480,17 +480,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -506,7 +508,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, }, windowHandler: func(n int) { @@ -597,12 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -738,7 +734,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -763,7 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -908,21 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1085,7 +1067,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { reader := data.Reader() if opts.Last { @@ -1114,10 +1096,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } + t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1127,7 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1136,7 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1242,7 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1383,11 +1366,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1439,7 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1809,14 +1792,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 584b50fe..0055fddd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -537,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1006,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1046,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1117,11 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { + if err := t.writeHeader(s, nil); err != nil { _ = reader.Close() return err } @@ -1147,6 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti _ = reader.Close() return err } + t.incrMsgSent() return nil } @@ -1276,7 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1297,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1415,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1455,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 00000000..a22a9015 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index e12cb0bc..2859b877 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -39,7 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -133,7 +131,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) + n, r.err = r.readMessageHeaderClient(header) } else { - n, r.err = r.readHeader(header) + n, r.err = r.readMessageHeader(header) } return n, r.err } @@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -288,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -303,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -369,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -480,90 +333,31 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// ReadHeader reads data into the provided header slice from the stream. It -// first checks if there was an error during a previous read operation and +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and // returns it if present. It then requests a read operation for the length of // the header. It continues to read from the stream until the entire header -// slice is filled or an error occurs. If an `io.EOF` error is encountered -// with partially read data, it is converted to `io.ErrUnexpectedEOF` to -// indicate an unexpected end of the stream. The method returns any error -// encountered during the read process or nil if the header was successfully -// read. -func (s *Stream) ReadHeader(header []byte) (err error) { +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) + n, err := s.trReader.ReadMessageHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -579,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -619,8 +413,8 @@ type transportReader struct { er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err return 0, err @@ -639,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -725,15 +508,9 @@ type ConnectOptions struct { BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -782,18 +559,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -813,12 +580,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -828,19 +589,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -852,12 +601,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 228e9c2f..65002e2c 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,6 +22,11 @@ import ( "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -219,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index e87a17f3..ee0ff969 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 20285451..8eb1cf3b 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -237,8 +238,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -330,3 +331,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index aba1ae3e..9fac2b08 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadHeader(header []byte) error + ReadMessageHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) + err := p.r.ReadMessageHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,9 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -817,7 +814,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -841,7 +838,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei var uncompressedBuf []byte uncompressedBuf, err = dc.Do(compressed.Reader()) if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} } size = len(uncompressedBuf) } else { @@ -877,30 +874,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return nil, 0, err } - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} - - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) if err != nil { out.Free() return nil, 0, err @@ -908,10 +882,14 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } +type recvCompressor interface { + RecvCompress() string +} + // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index d1e1415a..16065a02 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -87,12 +87,13 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -621,8 +622,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1020,7 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1137,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1165,7 +1166,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1212,7 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1320,7 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1354,15 +1355,12 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } defer d.Free() - if channelz.IsOn() { - t.IncrMsgRecv() - } df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) @@ -1404,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1431,20 +1429,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1484,9 +1482,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1502,7 +1497,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1541,7 +1536,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1561,7 +1556,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1648,7 +1642,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1717,7 +1711,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1735,10 +1729,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1768,7 +1762,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1783,17 +1777,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1802,17 +1799,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1825,7 +1822,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -2100,7 +2097,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2122,7 +2119,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 2671c5ef..7e83027d 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -297,7 +298,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +310,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 00000000..641c8e97 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 71195c49..6f20d2d5 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb2b2a21..17e2267b 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -584,7 +586,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport - s *transport.Stream + s *transport.ClientStream p *parser pickResult balancer.PickResult @@ -706,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -991,7 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1083,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1097,9 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } - if channelz.IsOn() { - a.t.IncrMsgSent() - } return nil } @@ -1153,9 +1151,6 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1183,7 +1178,7 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { - a.t.CloseStream(a.s, err) + a.s.Close(err) tr = a.s.Trailer() } @@ -1340,7 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream + s *transport.ClientStream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1380,7 +1375,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1430,7 +1425,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1440,9 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1480,9 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1510,7 +1499,7 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { - as.t.CloseStream(as.s, err) + as.s.Close(err) } if err != nil { @@ -1577,8 +1566,7 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec @@ -1628,7 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1668,7 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1676,9 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1710,7 +1695,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } @@ -1756,7 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1764,9 +1749,6 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5a47094a..a5b03882 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.68.1" +const Version = "1.69.0-dev" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3..8f9e592f 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df2..0e72d853 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c..024ffebd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12..08dad769 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,7 +5,7 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40..fa790e0f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b..d2f54949 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8..67a51b32 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356..fd4d0c83 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd012..d9b9d916 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b..7f67cbb6 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02f..bef5a25f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b8..9404270d 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a2..0d5b546e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e4..7c1f66c8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb7..78be9df3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577b..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e2..077712c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55..f72ddd88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122..6254f5de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000..9f6c32a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee..b6849d66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d4..741b5ed2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d..79e18666 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0..832a7988 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2b..1ffddf68 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f68..fb8e15e8 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b0..c36d4a9c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f292..78445d11 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 85617554..ebcb4a8a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830ed..002e0047 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6e..742cb518 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2a..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ea..0015fcb3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d38699..479527b5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d..24615656 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb07..6dea75cd 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c..c7e860fc 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -29,11 +29,9 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d..87da199a 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd9..b99d4d24 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645..0d20722d 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826..006060e5 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -69,11 +69,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +82,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -125,11 +123,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +136,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -181,11 +177,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +190,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -237,11 +231,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +244,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -293,11 +285,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +298,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,11 +339,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +352,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -405,11 +393,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +406,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,11 +447,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +460,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,11 +501,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +514,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -629,116 +611,6 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/modules.txt b/vendor/modules.txt index e1738445..275089dc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,7 +17,7 @@ github.com/davecgh/go-spew/spew ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log -# github.com/go-logr/logr v1.4.1 +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr # github.com/go-openapi/jsonpointer v0.19.6 @@ -154,10 +154,10 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.68.1 +# google.golang.org/grpc v1.69.0 ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes @@ -216,8 +216,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.35.1 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire